code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
# Author:w k
import nonebot as rcnb
__plugin_name = 'Bilibili'
Bilibili = rcnb.CommandGroup('Bilibili', only_to_me=False)
from . import get_cover
from . import live_subscription
| [
"nonebot.CommandGroup"
] | [((102, 149), 'nonebot.CommandGroup', 'rcnb.CommandGroup', (['"""Bilibili"""'], {'only_to_me': '(False)'}), "('Bilibili', only_to_me=False)\n", (119, 149), True, 'import nonebot as rcnb\n')] |
import math
import numpy as np
import random
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 96)
self.fc2 = nn.Linear(96, 96)
self.fc3 = nn.Linear(96, 96)
self.fc4 = nn.Linear(96, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.linear(self.fc4(x))
return x
class RlConfig:
def __init__(self,
pods_min,
pods_max,
resource_cost,
violation_cost,
autoscaling_period,
learning_rate,
discount_factor,
epsilon):
self.pods_min = pods_min
self.pods_max = pods_max
self.resource_cost = resource_cost
self.violation_cost = violation_cost
self.autoscaling_period = autoscaling_period
self.alpha = learning_rate
self.gamma = discount_factor
self.epsilon = epsilon
class HPA_Q_Learning:
def __init__(self, rl_config):
self.pods_min = rl_config.pods_min
self.pods_max = rl_config.pods_max
self.a_history = []
self.s_history = []
self.r_history = []
# (utilization, # of pods, actions)
#(2,10)
self.Q = Net()
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(Q.parameters(), lr=0.01)
self.action_space = list(range(self.pods_min, self.pods_max+1))
self.alpha = rl_config.alpha
self.gamma = rl_config.gamma
self.epsilon = rl_config.epsilon
# m4.xlarge => 4 vCPU => 0.2 USD / hour
# 1 vCPU => 0.05 USD / hour
# pod => 0.2 core => 0.01 USD
# error => 0.0005 USD
self.resource_cost = rl_config.resource_cost
self.violation_cost = rl_config.violation_cost
self.autoscaling_period = rl_config.autoscaling_period
def convert_obs(self, obs):
u = int(float(obs.U) // 0.1)
c = int(obs.C[-1])
c_avg = sum(obs.C) / len(obs.C)
e = sum(obs.E)
reward = -1 * self.resource_cost * c_avg * self.autoscaling_period + -1 * self.violation_cost * e
state = (u, c)
self.s_history.append(state)
self.r_history.append(reward)
return state, reward
def epsilon_decay(self):
self.epsilon = self.epsilon * 0.9
def get_action(self, state):
max_q = float('-inf')
max_a = []
if np.random.rand() < self.epsilon:
return random.choice(self.action_space)
for i in range(self.pods_min, self.pods_max+1):
if max_q < self.Q[state[0], state[1], i]:
max_q = self.Q[state[0], state[1], i]
max_a = [i]
elif max_q == self.Q[state[0], state[1], i]:
max_a.append(i)
desired_c = random.choice(max_a)
self.a_history.append(desired_c)
return desired_c
def update(self, s, a, s_next, r_next):
self.Q[s[0], s[1], a] = self.Q[s[0], s[1], a] + self.alpha * (r_next + self.gamma * np.nanmax(self.Q[s_next[0], s_next[1],: ]) - self.Q[s[0], s[1], a])
| [
"random.choice",
"numpy.random.rand",
"torch.nn.CrossEntropyLoss",
"numpy.nanmax",
"torch.nn.Linear"
] | [((278, 294), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(96)'], {}), '(2, 96)\n', (287, 294), True, 'import torch.nn as nn\n'), ((314, 331), 'torch.nn.Linear', 'nn.Linear', (['(96)', '(96)'], {}), '(96, 96)\n', (323, 331), True, 'import torch.nn as nn\n'), ((351, 368), 'torch.nn.Linear', 'nn.Linear', (['(96)', '(96)'], {}), '(96, 96)\n', (360, 368), True, 'import torch.nn as nn\n'), ((388, 405), 'torch.nn.Linear', 'nn.Linear', (['(96)', '(10)'], {}), '(96, 10)\n', (397, 405), True, 'import torch.nn as nn\n'), ((1452, 1473), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1471, 1473), True, 'import torch.nn as nn\n'), ((3004, 3024), 'random.choice', 'random.choice', (['max_a'], {}), '(max_a)\n', (3017, 3024), False, 'import random\n'), ((2608, 2624), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2622, 2624), True, 'import numpy as np\n'), ((2660, 2692), 'random.choice', 'random.choice', (['self.action_space'], {}), '(self.action_space)\n', (2673, 2692), False, 'import random\n'), ((3229, 3271), 'numpy.nanmax', 'np.nanmax', (['self.Q[s_next[0], s_next[1], :]'], {}), '(self.Q[s_next[0], s_next[1], :])\n', (3238, 3271), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
from abc import ABC
class ins_pos_kalman_filter(ABC):
def __init__(self, F, Q, H, R, initial_state_mean, initial_state_covariance):
"""
abstract initialization of kalman filter for INS data fusion for position estimation
Matrix notation matches that provided by https://en.wikipedia.org/wiki/Kalman_filter
:param F: state transition model matrix
:param Q: process noise covariance matrix
:param H: observation model matrix
:param R: observation noise covariance matrix
"""
if type(F) is not np.ndarray:
raise TypeError('F matrix must by np.ndarray')
if type(Q) is not np.ndarray:
raise TypeError('Q matrix must by np.ndarray')
if type(H) is not np.ndarray:
raise TypeError('H matrix must by np.ndarray')
if type(R) is not np.ndarray:
raise TypeError('R matrix must by np.ndarray')
if F.shape[1] != H.shape[1]:
raise RuntimeError('F and H must have same number of columns')
if Q.shape[1] != R.shape[1]:
raise RuntimeError('Q and R must have same number of columns')
self._kf = cv.KalmanFilter(F.shape[1], Q.shape[1])
self._kf.transitionMatrix = F
self._kf.processNoiseCov = Q
self._kf.measurementMatrix = H
self._kf.measurementNoiseCov = R
self._kf.statePost = initial_state_mean
self._kf.errorCovPost = initial_state_covariance
def estimate(self, measurement):
"""
incorporates measurment into kalman filter to update estimate and returns the current estimate provided by the
kalman filter
:param measurement: the measurement from sensors
:return: the estimate state
:return: the estimate state covariance
"""
self._kf.predict()
self._kf.correct(measurement)
return self._kf.statePost, self._kf.errorCovPost
class linear_gpsimu_pos_kalman_filter(ins_pos_kalman_filter):
def __init__(self, T, x0_mean, x0_cov):
"""
initializes linear kalman filter that fuses GPS and IMU sensors with linear transition matrices
:param T: time step in between estimations
"""
if type(T) not in [int, float]:
raise TypeError('T must be a number')
I3 = np.eye(3)
O3 = np.zeros((3, 3))
B = np.array([[1.], [1.], [1.]])
F = np.block([[I3, T * I3, T ** 2 / 2 * I3],
[O3, I3, T * I3],
[O3, O3, I3]])
Q = np.diag(np.hstack([T ** 3 / 6 * B.T, T ** 2 / 2 * B.T, T * B.T]).flatten())
H = np.block([[I3, O3, O3],
[O3, O3, O3],
[O3, O3, I3]])
R = np.eye(9) # THIS IS A PLACE HOLDER, REPLACE WITH NOISE COV OF GPS AND IMU SENSORS
super().__init__(F, Q, H, R, x0_mean, x0_cov)
| [
"numpy.block",
"numpy.eye",
"numpy.hstack",
"numpy.array",
"numpy.zeros",
"cv2.KalmanFilter"
] | [((1212, 1251), 'cv2.KalmanFilter', 'cv.KalmanFilter', (['F.shape[1]', 'Q.shape[1]'], {}), '(F.shape[1], Q.shape[1])\n', (1227, 1251), True, 'import cv2 as cv\n'), ((2369, 2378), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2375, 2378), True, 'import numpy as np\n'), ((2392, 2408), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2400, 2408), True, 'import numpy as np\n'), ((2421, 2452), 'numpy.array', 'np.array', (['[[1.0], [1.0], [1.0]]'], {}), '([[1.0], [1.0], [1.0]])\n', (2429, 2452), True, 'import numpy as np\n'), ((2463, 2536), 'numpy.block', 'np.block', (['[[I3, T * I3, T ** 2 / 2 * I3], [O3, I3, T * I3], [O3, O3, I3]]'], {}), '([[I3, T * I3, T ** 2 / 2 * I3], [O3, I3, T * I3], [O3, O3, I3]])\n', (2471, 2536), True, 'import numpy as np\n'), ((2681, 2733), 'numpy.block', 'np.block', (['[[I3, O3, O3], [O3, O3, O3], [O3, O3, I3]]'], {}), '([[I3, O3, O3], [O3, O3, O3], [O3, O3, I3]])\n', (2689, 2733), True, 'import numpy as np\n'), ((2790, 2799), 'numpy.eye', 'np.eye', (['(9)'], {}), '(9)\n', (2796, 2799), True, 'import numpy as np\n'), ((2601, 2657), 'numpy.hstack', 'np.hstack', (['[T ** 3 / 6 * B.T, T ** 2 / 2 * B.T, T * B.T]'], {}), '([T ** 3 / 6 * B.T, T ** 2 / 2 * B.T, T * B.T])\n', (2610, 2657), True, 'import numpy as np\n')] |
import pandas as pd
import os
import random
class MetaData:
def __init__(self, db):
self.db = db
self.ljs_path = '/data2/sungjaecho/data_tts/LJSpeech-1.1'
self.emovdb_path = '/data2/sungjaecho/data_tts/EmoV-DB/EmoV-DB'
self.metadata_path = 'metadata'
self.df = None
def get_df(self, split=None):
if split is None:
return self.df
elif split == 'train':
return self.df[self.df.split == 'train']
elif split == 'val':
return self.df[self.df.split == 'val']
elif split == 'test':
return self.df[self.df.split == 'test']
def load_original_db(self):
if self.db == "ljspeech":
csv_path = os.path.join(self.ljs_path, 'metadata.csv')
self.df = pd.read_csv(csv_path, sep='|', header=None, encoding='utf-8')
self.df = self.df.rename(columns={0:"id", 1:"text_raw", 2:"text"})
if self.db == "emovdb":
csv_path = os.path.join(self.emovdb_path, 'emov_db.csv')
self.df = pd.read_csv(csv_path, sep=',', encoding='utf-8')
self.df = self.df.rename(columns={
'sentence_path':'wav_path',
'transcription':'text'})
self.df.speaker = self.df.speaker.apply(self.change_speaker_name)
print("Loaded from {}".format(csv_path))
def add_columns(self, split_ratio):
'''
split_ratio: dict. e.g., {'train':0.8, 'val':0.1, 'test':0.1}
'''
if self.db == "ljspeech":
self.df['database'] = 'LJ-Speech-1.1'
self.df['wav_path'] = self.df.id.apply(self.get_wav_path)
self.df['speaker'] = ['ljs-w'] * len(self.df)
self.df['emotion'] = ['neutral'] * len(self.df)
self.df['sex'] = ['w'] * len(self.df)
self.df['lang'] = 'en'
self.df['split'] = self.get_split_labels(split_ratio)
self.df = self.df[['database','split','id','wav_path','text_raw','text','speaker','sex','emotion','lang']]
if self.db == "emovdb":
self.df['sex'] = self.df.speaker.apply(self.get_sex)
self.df['lang'] = 'en'
self.df['split'] = self.get_split_labels(split_ratio)
self.df = self.df[['database','split','id','wav_path','duration','text','speaker','sex','emotion','lang']]
def get_split_labels(self, split_ratio):
df_len = len(self.df)
i_val_start = int(df_len * split_ratio['train'])
i_test_start = int(df_len * (split_ratio['train'] + split_ratio['val']))
n_train = i_val_start
n_val = i_test_start - i_val_start
n_test = df_len - i_test_start
split_labels = (['train'] * n_train) + (['val'] * n_val) + (['test'] * n_test)
random.seed(3141)
random.shuffle(split_labels)
return split_labels
def get_wav_path(self, id, speaker=None, emotion=None):
if self.db == "ljspeech":
wav_path = os.path.join(self.ljs_path, "{}.wav".format(id))
return wav_path
def get_sex(self, speaker_name):
if self.db == "emovdb":
return speaker_name.split('-')[1]
def change_speaker_name(self, src_speaker_name):
if self.db == "emovdb":
if src_speaker_name == 'bea':
dst_speaker_name = '{}-w-{}'.format(self.db, src_speaker_name)
elif src_speaker_name == 'jenie':
dst_speaker_name = '{}-w-{}'.format(self.db, src_speaker_name)
elif src_speaker_name == 'josh':
dst_speaker_name = '{}-m-{}'.format(self.db, src_speaker_name)
elif src_speaker_name == 'sam':
dst_speaker_name = '{}-m-{}'.format(self.db, src_speaker_name)
return dst_speaker_name
def make_new_db(self, split_ratio):
self.load_original_db()
self.add_columns(split_ratio)
df = self.get_df()
csv_path = os.path.join(self.metadata_path, '{}.csv'.format(self.db))
df.to_csv(csv_path, index=False)
print("Saved! {}".format(csv_path))
splits = ['train', 'val', 'test']
for split in splits:
df = self.get_df(split)
csv_path = os.path.join(self.metadata_path, '{}_{}.csv'.format(self.db, split))
df.to_csv(csv_path, index=False)
print("Saved! {}".format(csv_path))
def print_data_stat(self):
csv_path = os.path.join(self.metadata_path, '{}.csv'.format(self.db))
df = pd.read_csv(csv_path)
print(self.db)
print(df.groupby(['split']).size().to_frame('size'))
csv_path = os.path.join(self.metadata_path, '{}_size_groupby_split.csv'.format(self.db))
df.groupby(['split']).size().to_frame('size').to_csv(csv_path)
print(df.groupby(['split', 'speaker', 'emotion']).size().to_frame('size'))
csv_path = os.path.join(self.metadata_path, '{}_size_groupby_split_speaker_emotion.csv'.format(self.db))
df.groupby(['split', 'speaker', 'emotion']).size().to_frame('size').to_csv(csv_path)
def save_csv_db():
db = "ljspeech"
split_ratio = {'train':0.99, 'val':0.005, 'test':0.005}
md = MetaData(db)
md.make_new_db(split_ratio)
db = "emovdb"
split_ratio = {'train':0.95, 'val':0.025, 'test':0.025}
md = MetaData(db)
md.make_new_db(split_ratio)
def print_data_stat():
db = "ljspeech"
md = MetaData(db)
md.print_data_stat()
db = "emovdb"
md = MetaData(db)
md.print_data_stat()
def main():
save_csv_db()
print_data_stat()
if __name__ == "__main__":
main()
| [
"os.path.join",
"random.shuffle",
"random.seed",
"pandas.read_csv"
] | [((2865, 2882), 'random.seed', 'random.seed', (['(3141)'], {}), '(3141)\n', (2876, 2882), False, 'import random\n'), ((2892, 2920), 'random.shuffle', 'random.shuffle', (['split_labels'], {}), '(split_labels)\n', (2906, 2920), False, 'import random\n'), ((4645, 4666), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (4656, 4666), True, 'import pandas as pd\n'), ((759, 802), 'os.path.join', 'os.path.join', (['self.ljs_path', '"""metadata.csv"""'], {}), "(self.ljs_path, 'metadata.csv')\n", (771, 802), False, 'import os\n'), ((826, 887), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'sep': '"""|"""', 'header': 'None', 'encoding': '"""utf-8"""'}), "(csv_path, sep='|', header=None, encoding='utf-8')\n", (837, 887), True, 'import pandas as pd\n'), ((1027, 1072), 'os.path.join', 'os.path.join', (['self.emovdb_path', '"""emov_db.csv"""'], {}), "(self.emovdb_path, 'emov_db.csv')\n", (1039, 1072), False, 'import os\n'), ((1096, 1144), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'sep': '""","""', 'encoding': '"""utf-8"""'}), "(csv_path, sep=',', encoding='utf-8')\n", (1107, 1144), True, 'import pandas as pd\n')] |
from RestrictedPython import compile_restricted
from RestrictedPython import Eval
from RestrictedPython import Guards
from RestrictedPython import safe_globals
from RestrictedPython import utility_builtins
from RestrictedPython.PrintCollector import PrintCollector
from multiprocessing import Process
from multiprocessing import Manager
import local_libs.ProblemFileHandler as Handler
import time
class PyOJAgent:
def __init__(self, memory_limit=1048576, time_limit=5):
self.name = 'default_agent'
self.memory_limit = memory_limit
self.time_limit = time_limit
self.submission_result = []
self.problem_dict = {}
self.compile_error_flag = False
self.compile_error_info = ''
self.problem_file_handler = Handler.ProblemFileHandler()
def load_problem_file(self, problem_file):
self.problem_dict = self.problem_file_handler.load_problem_file(problem_file)
if self.problem_dict:
return True
else:
return False
def test_submission(self, submission_code_str):
self.submission_result = []
self.compile_error_flag = False
if not self.problem_dict:
return
else:
pass
try:
compile_restricted(submission_code_str, '<inline>', 'exec')
except Exception as e:
self.compile_error_flag = True
self.compile_error_info = repr(e)
return
for test_case in self.problem_dict['test_cases']:
print('testing test case:', test_case, sep='\n')
suffix = '\noutput = main_function' + str(tuple(test_case[0]))
try:
manager = Manager()
py_code = submission_code_str + suffix
ret_dict = manager.dict()
p = Process(target=target_function, args=(py_code, ret_dict))
p.start()
time.sleep(self.time_limit)
p.terminate()
p.join()
if not ret_dict:
self.submission_result.append('服务器资源不足!')
return
else:
print('submission result: ', ret_dict['output'])
if ret_dict['RE_flag']:
self.submission_result.append('Runtime Error! ' + ret_dict['RE_info'])
elif ret_dict['TLE_flag']:
self.submission_result.append('Time Limit Exceeded! ')
elif ret_dict['output'] == test_case[1]:
self.submission_result.append('Accepted! ')
else:
self.submission_result.append('Wrong Answer! ') # add error types here maybe
except Exception as e:
print(repr(e))
def report_submission_result(self):
if self.compile_error_flag:
return "Compile Error!\n" + self.compile_error_info
elif not self.problem_dict:
return '未加载题目!'
elif not self.submission_result:
return 'No Report Available!'
else:
ret = ''
n = len(self.submission_result)
ret += '{0}组数据已测试,结果如下:\n'.format(n)
for i in range(n):
ret += '测试点{0}/{1}:'.format(i + 1, n)
ret += self.submission_result[i]
ret += '\n'
return ret
def describe_problem(self):
if not self.problem_dict:
return '未加载题目!'
else:
ret = '题目描述:\n'
ret += self.problem_dict['text']
ret += '\n========\n'
ret += '附加信息:\n'
ret += '本次测试时间限制:{0} s,内存限制:{1} KB\n'.format(self.time_limit, self.memory_limit)
return ret
def reset(self):
self.submission_result = []
self.problem_dict = {}
# this function has to be defined outside the PyOJAgent class for multiprocessing to pickle
def target_function(py_code, ret_dict):
policy_globals = generate_restricted_environment_policy()
policy_globals['output'] = None
ret_dict['RE_flag'] = False
ret_dict['RE_info'] = ''
ret_dict['TLE_flag'] = True
ret_dict['output'] = None
try:
byte_code = compile_restricted(py_code, '<inline>', 'exec')
exec(byte_code, policy_globals)
ret_dict['TLE_flag'] = False
ret_dict['output'] = policy_globals['output']
except Exception as e:
print(repr(e))
ret_dict['RE_flag'] = True # if RE, TLE flag would also be True
ret_dict['RE_info'] = repr(e)
finally:
pass
# print('finally')
def generate_restricted_environment_policy():
policy_globals = {**safe_globals, **utility_builtins}
policy_globals['__builtins__']['__metaclass__'] = type
policy_globals['__builtins__']['__name__'] = type
policy_globals['_getattr_'] = Guards.safer_getattr
policy_globals['_write_'] = Guards.full_write_guard
policy_globals['_getiter_'] = Eval.default_guarded_getiter
policy_globals['_getitem_'] = Eval.default_guarded_getitem
policy_globals['_print_'] = PrintCollector
policy_globals['_iter_unpack_sequence_'] = Guards.guarded_iter_unpack_sequence
return policy_globals | [
"local_libs.ProblemFileHandler.ProblemFileHandler",
"multiprocessing.Process",
"time.sleep",
"multiprocessing.Manager",
"RestrictedPython.compile_restricted"
] | [((791, 819), 'local_libs.ProblemFileHandler.ProblemFileHandler', 'Handler.ProblemFileHandler', ([], {}), '()\n', (817, 819), True, 'import local_libs.ProblemFileHandler as Handler\n'), ((4387, 4434), 'RestrictedPython.compile_restricted', 'compile_restricted', (['py_code', '"""<inline>"""', '"""exec"""'], {}), "(py_code, '<inline>', 'exec')\n", (4405, 4434), False, 'from RestrictedPython import compile_restricted\n'), ((1308, 1367), 'RestrictedPython.compile_restricted', 'compile_restricted', (['submission_code_str', '"""<inline>"""', '"""exec"""'], {}), "(submission_code_str, '<inline>', 'exec')\n", (1326, 1367), False, 'from RestrictedPython import compile_restricted\n'), ((1759, 1768), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1766, 1768), False, 'from multiprocessing import Manager\n'), ((1889, 1946), 'multiprocessing.Process', 'Process', ([], {'target': 'target_function', 'args': '(py_code, ret_dict)'}), '(target=target_function, args=(py_code, ret_dict))\n', (1896, 1946), False, 'from multiprocessing import Process\n'), ((1991, 2018), 'time.sleep', 'time.sleep', (['self.time_limit'], {}), '(self.time_limit)\n', (2001, 2018), False, 'import time\n')] |
"""MimicDB Key subclass wrapper
"""
import re
from boto.s3.key import Key as BotoKey
import mimicdb
from ..backends import tpl
class Key(BotoKey):
def __init__(self, *args, **kwargs):
"""Add the key to the bucket set if the key name is set and metadata is
available for it, otherwise wait until uploaded or downloaded.
"""
bucket = kwargs.get('bucket', args[0] if args else None)
name = kwargs.get('name', args[1] if len(args) > 1 else None)
self._name = name
if name and bucket:
meta = mimicdb.backend.hgetall(tpl.key % (bucket.name, name))
if meta:
mimicdb.backend.sadd(tpl.bucket % bucket.name, name)
self._load_meta(meta['size'], meta['md5'])
super(Key, self).__init__(*args, **kwargs)
def _load_meta(self, size, md5):
"""Set key attributes to retrived metadata. Might be extended in the
future to support more attributes.
"""
if not hasattr(self, 'local_hashes'):
self.local_hashes = {}
self.size = int(size)
if (re.match('^[a-fA-F0-9]{32}$', md5)):
self.md5 = md5
@property
def name(self):
return self._name
@name.setter
def name(self, value):
"""Key name can be set by Key.key or Key.name. Key.key sets Key.name
internally, so just handle this property. When changing the key
name, try to load it's metadata from MimicDB. If it's not available,
the key hasn't been uploaded, downloaded or synced so don't add it to
the bucket set (it also might have just been deleted,
see boto.s3.bucket.py#785)
"""
self._name = value
if value:
meta = mimicdb.backend.hgetall(tpl.key % (self.bucket.name, value))
if meta:
mimicdb.backend.sadd(tpl.bucket % self.bucket.name, value)
self._load_meta(meta['size'], meta['md5'])
def _send_file_internal(self, *args, **kwargs):
"""Called internally for any type of upload. After upload finishes,
make sure the key is in the bucket set and save the metadata.
"""
super(Key, self)._send_file_internal(*args, **kwargs)
mimicdb.backend.sadd(tpl.bucket % self.bucket.name, self.name)
mimicdb.backend.hmset(tpl.key % (self.bucket.name, self.name),
dict(size=self.size, md5=self.md5))
def _get_file_internal(self, *args, **kwargs):
"""Called internally for any type of download. After download finishes,
make sure the key is in the bucket set and save the metadata.
"""
super(Key, self)._get_file_internal(*args, **kwargs)
mimicdb.backend.sadd(tpl.bucket % self.bucket.name, self.name)
mimicdb.backend.hmset(tpl.key % (self.bucket.name, self.name),
dict(size=self.size, md5=self.md5))
| [
"re.match",
"mimicdb.backend.hgetall",
"mimicdb.backend.sadd"
] | [((1119, 1153), 're.match', 're.match', (['"""^[a-fA-F0-9]{32}$"""', 'md5'], {}), "('^[a-fA-F0-9]{32}$', md5)\n", (1127, 1153), False, 'import re\n'), ((2266, 2328), 'mimicdb.backend.sadd', 'mimicdb.backend.sadd', (['(tpl.bucket % self.bucket.name)', 'self.name'], {}), '(tpl.bucket % self.bucket.name, self.name)\n', (2286, 2328), False, 'import mimicdb\n'), ((2748, 2810), 'mimicdb.backend.sadd', 'mimicdb.backend.sadd', (['(tpl.bucket % self.bucket.name)', 'self.name'], {}), '(tpl.bucket % self.bucket.name, self.name)\n', (2768, 2810), False, 'import mimicdb\n'), ((567, 621), 'mimicdb.backend.hgetall', 'mimicdb.backend.hgetall', (['(tpl.key % (bucket.name, name))'], {}), '(tpl.key % (bucket.name, name))\n', (590, 621), False, 'import mimicdb\n'), ((1767, 1827), 'mimicdb.backend.hgetall', 'mimicdb.backend.hgetall', (['(tpl.key % (self.bucket.name, value))'], {}), '(tpl.key % (self.bucket.name, value))\n', (1790, 1827), False, 'import mimicdb\n'), ((660, 712), 'mimicdb.backend.sadd', 'mimicdb.backend.sadd', (['(tpl.bucket % bucket.name)', 'name'], {}), '(tpl.bucket % bucket.name, name)\n', (680, 712), False, 'import mimicdb\n'), ((1866, 1924), 'mimicdb.backend.sadd', 'mimicdb.backend.sadd', (['(tpl.bucket % self.bucket.name)', 'value'], {}), '(tpl.bucket % self.bucket.name, value)\n', (1886, 1924), False, 'import mimicdb\n')] |
from definitions import SYSTEM, System, GameStatus
import os
import asyncio
import logging as log
from consts import UBISOFT_REGISTRY_LAUNCHER_INSTALLS
if SYSTEM == System.WINDOWS:
import winreg
def _get_registry_value_from_path(top_key, registry_path, key):
with winreg.OpenKey(top_key, registry_path, 0, winreg.KEY_READ) as winkey:
return winreg.QueryValueEx(winkey, key)[0]
def _return_local_game_path_from_special_registry(special_registry_path):
if not special_registry_path:
return GameStatus.NotInstalled
try:
install_location = _get_registry_value_from_path(winreg.HKEY_LOCAL_MACHINE, special_registry_path,
"InstallLocation")
return install_location
except WindowsError:
# Entry doesn't exist, game is not installed.
return ""
except Exception as e:
log.warning(f"Unable to read special registry status for {special_registry_path}: {repr(e)}")
return ""
def _return_local_game_path(launch_id):
installs_path = UBISOFT_REGISTRY_LAUNCHER_INSTALLS
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, installs_path):
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, installs_path + f'\\{launch_id}') as lkey:
game_path, _ = winreg.QueryValueEx(lkey, 'InstallDir')
return os.path.normcase(os.path.normpath(game_path))
except OSError:
return "" # end of iteration
except WindowsError:
return "" # Game not installed / during installation
def get_local_game_path(special_registry_path, launch_id):
local_game_path = _return_local_game_path(launch_id)
if not local_game_path and special_registry_path:
local_game_path = _return_local_game_path_from_special_registry(special_registry_path)
return local_game_path
async def get_size_at_path(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
await asyncio.sleep(0)
return total_size
def _is_file_at_path(path, file):
if os.path.isdir(path):
file_location = os.path.join(path, file)
if os.path.isfile(file_location):
return True
return False
else:
return False
def _read_status_from_state_file(game_path):
try:
if os.path.exists(os.path.join(game_path, 'uplay_install.state')):
with open(os.path.join(game_path, 'uplay_install.state'), 'rb') as f:
if f.read()[0] == 0x0A:
return GameStatus.Installed
else:
return GameStatus.NotInstalled
# State file doesn't exit
else:
return GameStatus.NotInstalled
except Exception as e:
log.warning(f"Issue reading install state file for {game_path}: {repr(e)}")
return GameStatus.NotInstalled
def get_game_installed_status(path, exe=None, special_registry_path=None):
status = GameStatus.NotInstalled
try:
if path and os.access(path, os.F_OK):
status = _read_status_from_state_file(path)
# Fallback for old games
if status == GameStatus.NotInstalled and exe and special_registry_path:
if _is_file_at_path(path, exe):
status = GameStatus.Installed
except Exception as e:
log.error(f"Error reading game installed status at {path}: {repr(e)}")
finally:
return status | [
"os.path.getsize",
"winreg.QueryValueEx",
"winreg.OpenKey",
"os.access",
"os.path.join",
"os.path.isfile",
"os.path.normpath",
"os.path.isdir",
"asyncio.sleep",
"os.path.islink",
"os.walk"
] | [((2072, 2091), 'os.walk', 'os.walk', (['start_path'], {}), '(start_path)\n', (2079, 2091), False, 'import os\n'), ((2366, 2385), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2379, 2385), False, 'import os\n'), ((288, 346), 'winreg.OpenKey', 'winreg.OpenKey', (['top_key', 'registry_path', '(0)', 'winreg.KEY_READ'], {}), '(top_key, registry_path, 0, winreg.KEY_READ)\n', (302, 346), False, 'import winreg\n'), ((2412, 2436), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (2424, 2436), False, 'import os\n'), ((2449, 2478), 'os.path.isfile', 'os.path.isfile', (['file_location'], {}), '(file_location)\n', (2463, 2478), False, 'import os\n'), ((374, 406), 'winreg.QueryValueEx', 'winreg.QueryValueEx', (['winkey', 'key'], {}), '(winkey, key)\n', (393, 406), False, 'import winreg\n'), ((1167, 1223), 'winreg.OpenKey', 'winreg.OpenKey', (['winreg.HKEY_LOCAL_MACHINE', 'installs_path'], {}), '(winreg.HKEY_LOCAL_MACHINE, installs_path)\n', (1181, 1223), False, 'import winreg\n'), ((2140, 2164), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (2152, 2164), False, 'import os\n'), ((2647, 2693), 'os.path.join', 'os.path.join', (['game_path', '"""uplay_install.state"""'], {}), "(game_path, 'uplay_install.state')\n", (2659, 2693), False, 'import os\n'), ((3340, 3364), 'os.access', 'os.access', (['path', 'os.F_OK'], {}), '(path, os.F_OK)\n', (3349, 3364), False, 'import os\n'), ((2185, 2203), 'os.path.islink', 'os.path.islink', (['fp'], {}), '(fp)\n', (2199, 2203), False, 'import os\n'), ((2236, 2255), 'os.path.getsize', 'os.path.getsize', (['fp'], {}), '(fp)\n', (2251, 2255), False, 'import os\n'), ((1265, 1340), 'winreg.OpenKey', 'winreg.OpenKey', (['winreg.HKEY_LOCAL_MACHINE', "(installs_path + f'\\\\{launch_id}')"], {}), "(winreg.HKEY_LOCAL_MACHINE, installs_path + f'\\\\{launch_id}')\n", (1279, 1340), False, 'import winreg\n'), ((1386, 1425), 'winreg.QueryValueEx', 'winreg.QueryValueEx', (['lkey', '"""InstallDir"""'], {}), "(lkey, 'InstallDir')\n", (1405, 1425), False, 'import winreg\n'), ((2279, 2295), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (2292, 2295), False, 'import asyncio\n'), ((2719, 2765), 'os.path.join', 'os.path.join', (['game_path', '"""uplay_install.state"""'], {}), "(game_path, 'uplay_install.state')\n", (2731, 2765), False, 'import os\n'), ((1471, 1498), 'os.path.normpath', 'os.path.normpath', (['game_path'], {}), '(game_path)\n', (1487, 1498), False, 'import os\n')] |
"""
This module contains all that methods that determine if user provided details
are correct.
"""
from AllDBFields import BaseFields
from AllDBFields import AuthenticationFields
import CryptKeeper
import DatabaseLayer
import re
import cherrypy
def is_login_taken(login):
"""
checks the database to determine if an email address has already been used.
args:
login:
this should be an email address
returns:
a boolean. true if email is already used. false if not.
"""
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
if collection.find({AuthenticationFields.USER_LOGIN:login.lower()})\
.count() > 0:
return True
else:
return False
def get_user(login):
"""
gets the user data from the database
args:
this should be an email address
returns:
a dict containing, the user email address in forced lower case,
the users encrypted password, and the user's email address in whatever
case they saved it as.
"""
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
return collection.find_one({AuthenticationFields.USER_LOGIN:login.lower()})
def insert_new_user(login,pw,shipName=""):
"""
This used during the create new user process
saves a new user to the database
args:
login:
unique email address supplied by the user
pw:
password supplied by the user.
shipName:
The space ship name is supplied by the user.
Not required but lame if not there.
returns:
a tuple containing the primary key of the newly inserted user from the
User table, the primary key of the account created for the new user,
and the primary key of the hero created for the new user.
"""
from Account import Account
from Hero import Hero
if is_login_taken(login):
raise FileExistsError("That email is already taken")
loginPk = safe_insert_new_user(login,pw)
accountId = Account.create_new_account_in_db(loginPk)
heroId = Hero.construct_new_hero_in_db(accountId,shipName)
return (loginPk,accountId,heroId)
def safe_insert_new_user(login,pw):
"""
This used during the create new user process.
this should be called when doing the actual inserting of a new user.
This encrypts the password before saving.
args:
login:
unique email address supplied by the user
pw:
the unencrypted password supplied by the user.
returns:
the primary key returned from the Database upon insertion of
the new user
"""
safePw = CryptKeeper.encrypt_str(pw)
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
id = collection.insert_one({AuthenticationFields.USER_LOGIN:login.lower(),
AuthenticationFields.USER_PASSWORD:safePw,
AuthenticationFields.USER_DESC: login}).inserted_id
return id
def authenticate_user(login,pw):
"""
This is used during the login process.
Determines if the user is trying to log on with a valid login and
also determines if the user is trying to log on with a correct password
args:
login:
email address supplied by the user
pw:
the un<PASSWORD> supplied by the user.
returns:
a dict with two keys, a boolean: 'success' and list: 'errors.'
'success' tells the caller whether or not the login attempt was successful.
If it was 'success' is true, then 'errors' should be an empty list.
If 'success' is false, then 'errors' will have between one
and two elements. Each of them will be id-css selectors for jquery
to use.
"""
user = get_user(login)
resultDict = {'messages':[],'success':False}
if not user:
resultDict['messages'].append("#bad_login")
return resultDict
if not CryptKeeper.password_is_right(\
pw,user[AuthenticationFields.USER_PASSWORD]):
resultDict['messages'].append("#bad_login_pw")
return resultDict
resultDict['success'] = True
return resultDict
def get_loginPk_by_login(validLogin):
"""
args:
validLogin:
I'm gonna assume that this login has already been vetted earlier
in the program.
return:
an objectId to the users collection
"""
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
login = collection.find_one({AuthenticationFields.USER_LOGIN: validLogin})
return login[BaseFields.PK_KEY]
def get_accountPk_by_loginPk(loginPk):
"""
args:
loginPk:
an fk to the user collection
return:
an objectId to the account collection
"""
from AllDBFields import AccountDbFields
collection = DatabaseLayer.get_table(AccountDbFields.COLLECTION_NAME)
account = collection.find_one({AccountDbFields.LOGIN_PK_KEY:loginPk})
return account[AccountDbFields.PK_KEY]
def get_heroPk_by_accountPk(accountPk):
"""
args:
userId:
an fk to the account collection
return:
an objectId to the hero collection
"""
from AllDBFields import HeroDbFields
collection = DatabaseLayer.get_table(HeroDbFields.COLLECTION_NAME)
hero = collection.find_one({HeroDbFields.ACCOUNT_PK_KEY:accountPk})
return hero[HeroDbFields.PK_KEY]
def validate_email(email):
"""
This used during the create new user process.
determines if the email supplied is formatted correctly and doesn't
already exist.
args:
email:
An email address supplied by the user
returns:
a dict with two keys, a boolean: 'success' and list: 'errors.'
'success' tells the caller whether or not the login attempt was successful.
If it was 'success' is true, then 'errors' should be an empty list.
If 'success' is false, then 'errors' will have between one
and two elements. Each of them will be id-css selectors for jquery
to use.
"""
if not re.match(r"[^@]+@[^@]+\.[^@]+",email):
return {'success': False,'messages':["#bad_email"]}
if is_login_taken(email):
return {'success': False,'messages':["#taken_email"]}
return {'success': True,'messages':["#good_email"]}
def check_all_validations_for_new_login(email1,email2,pw1,pw2,shipName):
"""
This used during the create new user process.
This method calls other validation methods and baically determines
if any of the info that the user entered was illegal.
args:
all args should be strings and less than 256 characters else
this will return invalid.
email1:
this should be input that will also pass the
validate_email test
email2:
this should match email1.
pw1:
this only needs to pass any password complexity requirements
that have been added to the method. Currently the only
requirement is that the password must be at least 6
characters.
pw2:
this should match pw1:
returns:
a list of violations that either the user's email or password
commits. An empty list implies that everything is dandy.
"""
flags = []
if len(email1) <= 256:
emailValidationResult = validate_email(email1)
if not emailValidationResult['success']:
flags.extend(emailValidationResult['messages'])
if email1 != email2:
flags.append("#mismatched_email")
else:
if len(email1) > 256:
flags.append("#email1_too_long")
if len(email2) > 256:
flags.append("#email2_too_long")
if len(pw1) <= 256:
if len(pw1) < 6:
flags.append("#short_pw")
if pw1 != pw2:
flags.append("#mismatched_pw")
else:
if len(pw1) > 256:
flags.append("pw1_too_long")
if len(pw2) > 256:
flags.append("pw2_too_long")
if len(shipName) > 256:
flags.append("#shipname_too_long")
return flags
#disableAuthenticationRedirects should only ever be used in testing.
#Never in production
disableAuthenticationRedirects = False
def redirect_unauthenticated():
"""
a cherrypy decororator. Place the decorator infront of controller
methods that return parts of the website which the user needs to be
logged in to see. If they are not logged in, redirect them to the
login page.
"""
if disableAuthenticationRedirects:
return
username = cherrypy.session.get(BaseFields.SESSION_KEY)
if not username:
raise cherrypy.HTTPRedirect("/login")
def redirect_authenticated():
"""
a cherrypy decororator. Place the decorator infront of controller
methods that return the login part of the website.
If they are already logged in, redirect them to the main page.
"""
if disableAuthenticationRedirects:
return
username = cherrypy.session.get(BaseFields.SESSION_KEY)
if username:
raise cherrypy.HTTPRedirect("/")
#These are useable as soon as I import AuthenticationLayer
cherrypy.tools.redirect_unauthenticated = cherrypy.Tool("before_handler",redirect_unauthenticated)
cherrypy.tools.redirect_authenticated = cherrypy.Tool("before_handler",redirect_authenticated)
| [
"cherrypy.session.get",
"CryptKeeper.password_is_right",
"CryptKeeper.encrypt_str",
"cherrypy.Tool",
"re.match",
"DatabaseLayer.get_table",
"Account.Account.create_new_account_in_db",
"Hero.Hero.construct_new_hero_in_db",
"cherrypy.HTTPRedirect"
] | [((8882, 8939), 'cherrypy.Tool', 'cherrypy.Tool', (['"""before_handler"""', 'redirect_unauthenticated'], {}), "('before_handler', redirect_unauthenticated)\n", (8895, 8939), False, 'import cherrypy\n'), ((8979, 9034), 'cherrypy.Tool', 'cherrypy.Tool', (['"""before_handler"""', 'redirect_authenticated'], {}), "('before_handler', redirect_authenticated)\n", (8992, 9034), False, 'import cherrypy\n'), ((530, 591), 'DatabaseLayer.get_table', 'DatabaseLayer.get_table', (['AuthenticationFields.COLLECTION_NAME'], {}), '(AuthenticationFields.COLLECTION_NAME)\n', (553, 591), False, 'import DatabaseLayer\n'), ((1053, 1114), 'DatabaseLayer.get_table', 'DatabaseLayer.get_table', (['AuthenticationFields.COLLECTION_NAME'], {}), '(AuthenticationFields.COLLECTION_NAME)\n', (1076, 1114), False, 'import DatabaseLayer\n'), ((2007, 2048), 'Account.Account.create_new_account_in_db', 'Account.create_new_account_in_db', (['loginPk'], {}), '(loginPk)\n', (2039, 2048), False, 'from Account import Account\n'), ((2060, 2110), 'Hero.Hero.construct_new_hero_in_db', 'Hero.construct_new_hero_in_db', (['accountId', 'shipName'], {}), '(accountId, shipName)\n', (2089, 2110), False, 'from Hero import Hero\n'), ((2618, 2645), 'CryptKeeper.encrypt_str', 'CryptKeeper.encrypt_str', (['pw'], {}), '(pw)\n', (2641, 2645), False, 'import CryptKeeper\n'), ((2661, 2722), 'DatabaseLayer.get_table', 'DatabaseLayer.get_table', (['AuthenticationFields.COLLECTION_NAME'], {}), '(AuthenticationFields.COLLECTION_NAME)\n', (2684, 2722), False, 'import DatabaseLayer\n'), ((4294, 4355), 'DatabaseLayer.get_table', 'DatabaseLayer.get_table', (['AuthenticationFields.COLLECTION_NAME'], {}), '(AuthenticationFields.COLLECTION_NAME)\n', (4317, 4355), False, 'import DatabaseLayer\n'), ((4701, 4757), 'DatabaseLayer.get_table', 'DatabaseLayer.get_table', (['AccountDbFields.COLLECTION_NAME'], {}), '(AccountDbFields.COLLECTION_NAME)\n', (4724, 4757), False, 'import DatabaseLayer\n'), ((5099, 5152), 'DatabaseLayer.get_table', 'DatabaseLayer.get_table', (['HeroDbFields.COLLECTION_NAME'], {}), '(HeroDbFields.COLLECTION_NAME)\n', (5122, 5152), False, 'import DatabaseLayer\n'), ((8281, 8325), 'cherrypy.session.get', 'cherrypy.session.get', (['BaseFields.SESSION_KEY'], {}), '(BaseFields.SESSION_KEY)\n', (8301, 8325), False, 'import cherrypy\n'), ((8683, 8727), 'cherrypy.session.get', 'cherrypy.session.get', (['BaseFields.SESSION_KEY'], {}), '(BaseFields.SESSION_KEY)\n', (8703, 8727), False, 'import cherrypy\n'), ((3835, 3910), 'CryptKeeper.password_is_right', 'CryptKeeper.password_is_right', (['pw', 'user[AuthenticationFields.USER_PASSWORD]'], {}), '(pw, user[AuthenticationFields.USER_PASSWORD])\n', (3864, 3910), False, 'import CryptKeeper\n'), ((5912, 5950), 're.match', 're.match', (['"""[^@]+@[^@]+\\\\.[^@]+"""', 'email'], {}), "('[^@]+@[^@]+\\\\.[^@]+', email)\n", (5920, 5950), False, 'import re\n'), ((8355, 8386), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['"""/login"""'], {}), "('/login')\n", (8376, 8386), False, 'import cherrypy\n'), ((8753, 8779), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['"""/"""'], {}), "('/')\n", (8774, 8779), False, 'import cherrypy\n')] |
from setuptools import setup, find_packages
setup(
name="torchsummary",
version="1.5.1.1",
description="(Advanced Pytorch-Summary)Model summary in PyTorch similar to `model.summary()` in Keras",
url="https://github.com/sksq96/pytorch-summary",
author="(Modified)skyguidance - (Original)<NAME> @sksq96",
author_email="<EMAIL>",
packages=["torchsummary"],
)
| [
"setuptools.setup"
] | [((45, 373), 'setuptools.setup', 'setup', ([], {'name': '"""torchsummary"""', 'version': '"""1.5.1.1"""', 'description': '"""(Advanced Pytorch-Summary)Model summary in PyTorch similar to `model.summary()` in Keras"""', 'url': '"""https://github.com/sksq96/pytorch-summary"""', 'author': '"""(Modified)skyguidance - (Original)<NAME> @sksq96"""', 'author_email': '"""<EMAIL>"""', 'packages': "['torchsummary']"}), "(name='torchsummary', version='1.5.1.1', description=\n '(Advanced Pytorch-Summary)Model summary in PyTorch similar to `model.summary()` in Keras'\n , url='https://github.com/sksq96/pytorch-summary', author=\n '(Modified)skyguidance - (Original)<NAME> @sksq96', author_email=\n '<EMAIL>', packages=['torchsummary'])\n", (50, 373), False, 'from setuptools import setup, find_packages\n')] |
#! /usr/bin/env python
import rospy
import time
import actionlib
from basics.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback
# Callback funtion, invoked when a new goal is received
def do_timer(goal):
start_time = time.time() # Save current time
update_count = 0 # Initialize update counter
# The maximum time is 60 seconds. Abort if given goal is greater.
if goal.time_to_wait.to_sec() > 60.0:
result = TimerResult()
result.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
result.updates_sent = update_count
server.set_aborted(result, "Timer aborted due to too long wait")
return
# Go over the goal time in 1.0 s increments, testing for preemption and
# sending updates in between pauses
while (time.time() - start_time) < goal.time_to_wait.to_sec():
# Check for preemption and abort if true
if server.is_preempt_requested():
result = TimerResult()
result.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
result.updates_sent = update_count
server.set_preempted(result, "Timer preempted")
return
# Send feedback
feedback = TimerFeedback()
feedback.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
feedback.time_remaining = goal.time_to_wait - feedback.time_elapsed
server.publish_feedback(feedback)
update_count += 1 # Increment update counter
# Sleep for 1.0 sec
time.sleep(1.0)
# If the loop is concluded, the timer succeeded. Send the final result.
result = TimerResult()
result.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
result.updates_sent = update_count
server.set_succeeded(result, "Timer completed successfully")
# Initialize the node
rospy.init_node('fancy_action_server')
# Create a SimpleActionServer (server_name, action_type, callback, autostart)
server = actionlib.SimpleActionServer('timer', TimerAction, do_timer, False)
# Start the server
server.start()
# Pass control to ROS and wait for goal messages
rospy.spin()
| [
"rospy.init_node",
"actionlib.SimpleActionServer",
"time.sleep",
"basics.msg.TimerResult",
"rospy.spin",
"basics.msg.TimerFeedback",
"time.time"
] | [((1707, 1745), 'rospy.init_node', 'rospy.init_node', (['"""fancy_action_server"""'], {}), "('fancy_action_server')\n", (1722, 1745), False, 'import rospy\n'), ((1834, 1901), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['"""timer"""', 'TimerAction', 'do_timer', '(False)'], {}), "('timer', TimerAction, do_timer, False)\n", (1862, 1901), False, 'import actionlib\n'), ((1987, 1999), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1997, 1999), False, 'import rospy\n'), ((231, 242), 'time.time', 'time.time', ([], {}), '()\n', (240, 242), False, 'import time\n'), ((1499, 1512), 'basics.msg.TimerResult', 'TimerResult', ([], {}), '()\n', (1510, 1512), False, 'from basics.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback\n'), ((432, 445), 'basics.msg.TimerResult', 'TimerResult', ([], {}), '()\n', (443, 445), False, 'from basics.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback\n'), ((1125, 1140), 'basics.msg.TimerFeedback', 'TimerFeedback', ([], {}), '()\n', (1138, 1140), False, 'from basics.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback\n'), ((1398, 1413), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (1408, 1413), False, 'import time\n'), ((753, 764), 'time.time', 'time.time', ([], {}), '()\n', (762, 764), False, 'import time\n'), ((903, 916), 'basics.msg.TimerResult', 'TimerResult', ([], {}), '()\n', (914, 916), False, 'from basics.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback\n'), ((1560, 1571), 'time.time', 'time.time', ([], {}), '()\n', (1569, 1571), False, 'import time\n'), ((494, 505), 'time.time', 'time.time', ([], {}), '()\n', (503, 505), False, 'import time\n'), ((1191, 1202), 'time.time', 'time.time', ([], {}), '()\n', (1200, 1202), False, 'import time\n'), ((966, 977), 'time.time', 'time.time', ([], {}), '()\n', (975, 977), False, 'import time\n')] |
from typing import Callable, List, Optional, OrderedDict # for OrderedMeta
from enum import Enum
class OrderedMeta(type):
"""Replaces the inheriting object's dict of attributes with an OrderedDict that preserves enumeration order
Reference: https://stackoverflow.com/questions/11296010/iterate-through-class-members-in-order-of-their-declaration
Usage:
# Set the metaclass property of your custom class to OrderedMeta
class Person(metaclass=OrderedMeta):
name = None
date_of_birth = None
nationality = None
gender = None
address = None
comment = None
# Can then enumerate members while preserving order
for member in Person._orderedKeys:
if not getattr(Person, member):
print(member)
"""
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
def __new__(cls, name, bases, clsdict):
c = type.__new__(cls, name, bases, clsdict)
c._orderedKeys = clsdict.keys()
return c
def inspect_callable_arguments(a_callable: Callable):
""" Not yet validated/implemented
Progress:
import inspect
from neuropy.plotting.ratemaps import plot_ratemap_1D, plot_ratemap_2D
fn_spec = inspect.getfullargspec(plot_ratemap_2D)
fn_sig = inspect.signature(plot_ratemap_2D)
?fn_sig
# fn_sig
dict(fn_sig.parameters)
# fn_sig.parameters.values()
fn_sig.parameters['plot_mode']
# fn_sig.parameters
fn_spec.args # all kwarg arguments: ['x', 'y', 'num_bins', 'debug_print']
fn_spec.defaults[-2].__class__.__name__ # a tuple of default values corresponding to each argument in args; ((64, 64), False)
"""
import inspect
fn_spec = inspect.getfullargspec(a_callable)
# fn_sig = inspect.signature(compute_position_grid_bin_size)
return fn_spec
# def get_arguments_as_passthrough(**kwargs):
def get_arguments_as_optional_dict(**kwargs):
""" Easily converts your existing argument-list style default values into a dict:
Defines a simple function that takes only **kwargs as its inputs and prints the values it recieves. Paste your values as arguments to the function call. The dictionary will be output to the console, so you can easily copy and paste.
Usage:
>>> get_arguments_as_optional_dict(point_size=8, font_size=10, name='build_center_labels_test', shape_opacity=0.8, show_points=False)
Output: ", **({'point_size': 8, 'font_size': 10, 'name': 'build_center_labels_test', 'shape_opacity': 0.8, 'show_points': False} | kwargs)"
"""
print(', **(' + f'{kwargs}' + ' | kwargs)')
# Enum for size units
class SIZE_UNIT(Enum):
BYTES = 1
KB = 2
MB = 3
GB = 4
def convert_unit(size_in_bytes, unit):
""" Convert the size from bytes to other units like KB, MB or GB"""
if unit == SIZE_UNIT.KB:
return size_in_bytes/1024
elif unit == SIZE_UNIT.MB:
return size_in_bytes/(1024*1024)
elif unit == SIZE_UNIT.GB:
return size_in_bytes/(1024*1024*1024)
else:
return size_in_bytes
| [
"typing.OrderedDict",
"inspect.getfullargspec"
] | [((1840, 1874), 'inspect.getfullargspec', 'inspect.getfullargspec', (['a_callable'], {}), '(a_callable)\n', (1862, 1874), False, 'import inspect\n'), ((917, 930), 'typing.OrderedDict', 'OrderedDict', ([], {}), '()\n', (928, 930), False, 'from typing import Callable, List, Optional, OrderedDict\n')] |
from lxml import html
import requests
import os
import csv
#Deleting the csv file if it exists
csvfile = "US_State_Measures.csv"
try:
os.remove(csvfile)
except OSError:
pass
htmlfiles = []
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(".html"):
htmlfiles.append(os.path.join(root, file))
#print ("File List : ", htmlfiles)
#website scrape code
#page = requests.get('https://covid19.healthdata.org/united-states-of-america/virginia')
#tree = html.fromstring(page.content)
#measure = tree.xpath('//*[@id="root"]/div/main/div[3]/div[1]/div[2]/div[1]/div[1]/text()')
#value = tree.xpath('//*[@id="root"]/div/main/div[3]/div[1]/div[2]/div[1]/div[2]/text()')
for htmlfile in htmlfiles:
with open(htmlfile, "r") as f:
tree = html.fromstring(f.read())
lmeasure = tree.xpath('//div[@class="_3xMrzF3nxII5ysvl1_7Ncx _1K95TivjKGl4X5qplZyPFT"]/text()')
lvalue = tree.xpath('//div[@class="fOHfNYVUtJdcPK7UXSNn4"]/text()')
state = tree.xpath('//span[@class="ant-select-selection-item"]/text()')
for _ in lmeasure:
state.extend(state)
#Combine the measure and date values
lall = [lmeasure,lvalue]
#map the result for measure with corresponding dates
lcsv = list(map(list, zip(*lall)))
lrows = [[x] + y for x, y in zip(state, lcsv)]
#print ("List of Measures: ", lmeasure)
#print ("List of Values: ", lvalue)
print ("Combined list: ", lrows)
#print ("State: ", state[0])
with open(csvfile, 'a', newline='') as cf:
writer = csv.writer(cf)
writer.writerows(lrows)
| [
"csv.writer",
"os.path.join",
"os.walk",
"os.remove"
] | [((224, 236), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (231, 236), False, 'import os\n'), ((139, 157), 'os.remove', 'os.remove', (['csvfile'], {}), '(csvfile)\n', (148, 157), False, 'import os\n'), ((1632, 1646), 'csv.writer', 'csv.writer', (['cf'], {}), '(cf)\n', (1642, 1646), False, 'import csv\n'), ((325, 349), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (337, 349), False, 'import os\n')] |
##############################################################################
# Copyright (c) 2018 <NAME>, <NAME>, and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from django.http import HttpResponse
from django.shortcuts import render
from account.models import Lab
import uuid
from workflow.workflow_manager import ManagerTracker, SessionManager
import logging
logger = logging.getLogger(__name__)
def attempt_auth(request):
try:
manager = ManagerTracker.managers[request.session['manager_session']]
return manager
except KeyError:
return None
def remove_workflow(request):
manager = attempt_auth(request)
if not manager:
return no_workflow(request)
has_more_workflows, result = manager.pop_workflow(discard=True)
if not has_more_workflows: # this was the last workflow, so delete the reference to it in the tracker
del ManagerTracker.managers[request.session['manager_session']]
return manager.render(request)
def add_workflow(request):
manager = attempt_auth(request)
if not manager:
return no_workflow(request)
try:
workflow_type = int(request.POST.get('workflow_type'))
except ValueError:
return HttpResponse(status=400)
manager.add_workflow(workflow_type=workflow_type)
return manager.render(request) # do we want this?
def manager_view(request):
manager = attempt_auth(request)
if not manager:
return no_workflow(request)
return manager.handle_request(request)
def viewport_view(request):
if not request.user.is_authenticated:
return login(request)
manager = attempt_auth(request)
if manager is None:
return no_workflow(request)
if request.method != 'GET':
return HttpResponse(status=405)
context = {
'contact_email': Lab.objects.get(name="UNH_IOL").contact_email
}
return render(request, 'workflow/viewport-base.html', context)
def create_workflow(request):
if request.method != 'POST':
return HttpResponse(status=405)
workflow_type = request.POST.get('workflow_type')
try:
workflow_type = int(workflow_type)
except Exception:
return HttpResponse(status=400)
mgr_uuid = create_session(workflow_type, request=request,)
request.session['manager_session'] = mgr_uuid
return HttpResponse()
def create_session(wf_type, request):
smgr = SessionManager(request=request)
smgr.add_workflow(workflow_type=wf_type, target_id=request.POST.get("target"))
manager_uuid = uuid.uuid4().hex
ManagerTracker.getInstance().managers[manager_uuid] = smgr
return manager_uuid
def no_workflow(request):
return render(request, 'workflow/no_workflow.html', {'title': "Not Found"}, status=404)
def login(request):
return render(request, "dashboard/login.html", {'title': 'Authentication Required'})
| [
"logging.getLogger",
"django.shortcuts.render",
"workflow.workflow_manager.SessionManager",
"account.models.Lab.objects.get",
"django.http.HttpResponse",
"uuid.uuid4",
"workflow.workflow_manager.ManagerTracker.getInstance"
] | [((666, 693), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (683, 693), False, 'import logging\n'), ((2194, 2249), 'django.shortcuts.render', 'render', (['request', '"""workflow/viewport-base.html"""', 'context'], {}), "(request, 'workflow/viewport-base.html', context)\n", (2200, 2249), False, 'from django.shortcuts import render\n'), ((2647, 2661), 'django.http.HttpResponse', 'HttpResponse', ([], {}), '()\n', (2659, 2661), False, 'from django.http import HttpResponse\n'), ((2713, 2744), 'workflow.workflow_manager.SessionManager', 'SessionManager', ([], {'request': 'request'}), '(request=request)\n', (2727, 2744), False, 'from workflow.workflow_manager import ManagerTracker, SessionManager\n'), ((2991, 3076), 'django.shortcuts.render', 'render', (['request', '"""workflow/no_workflow.html"""', "{'title': 'Not Found'}"], {'status': '(404)'}), "(request, 'workflow/no_workflow.html', {'title': 'Not Found'}, status=404\n )\n", (2997, 3076), False, 'from django.shortcuts import render\n'), ((3105, 3182), 'django.shortcuts.render', 'render', (['request', '"""dashboard/login.html"""', "{'title': 'Authentication Required'}"], {}), "(request, 'dashboard/login.html', {'title': 'Authentication Required'})\n", (3111, 3182), False, 'from django.shortcuts import render\n'), ((2063, 2087), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(405)'}), '(status=405)\n', (2075, 2087), False, 'from django.http import HttpResponse\n'), ((2330, 2354), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(405)'}), '(status=405)\n', (2342, 2354), False, 'from django.http import HttpResponse\n'), ((2847, 2859), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2857, 2859), False, 'import uuid\n'), ((1516, 1540), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(400)'}), '(status=400)\n', (1528, 1540), False, 'from django.http import HttpResponse\n'), ((2130, 2161), 'account.models.Lab.objects.get', 'Lab.objects.get', ([], {'name': '"""UNH_IOL"""'}), "(name='UNH_IOL')\n", (2145, 2161), False, 'from account.models import Lab\n'), ((2498, 2522), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(400)'}), '(status=400)\n', (2510, 2522), False, 'from django.http import HttpResponse\n'), ((2868, 2896), 'workflow.workflow_manager.ManagerTracker.getInstance', 'ManagerTracker.getInstance', ([], {}), '()\n', (2894, 2896), False, 'from workflow.workflow_manager import ManagerTracker, SessionManager\n')] |
import code.book_plots as bp
import code.gh_internal as gh
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np;
from filterpy.discrete_bayes import normalize
def scaled_update (hall, belief, z, prob):
scale_ = prob/(1-prob)
belief[hall==1] *=scale_
normalize(belief)
belief = np.array([0.1]*10)
hallway = np.array([1, 1, 0, 0, 0, 0, 0, 0, 1, 0])
reading = 1
scaled_update(hallway, belief, reading, prob=0.75)
belief /= sum(belief);
print("belief:", belief)
print ("sum = ", sum(belief))
plt.figure()
bp.bar_plot(belief).show()
| [
"filterpy.discrete_bayes.normalize",
"numpy.array",
"matplotlib.pyplot.figure",
"code.book_plots.bar_plot"
] | [((332, 352), 'numpy.array', 'np.array', (['([0.1] * 10)'], {}), '([0.1] * 10)\n', (340, 352), True, 'import numpy as np\n'), ((361, 401), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0, 0, 0, 0, 1, 0]'], {}), '([1, 1, 0, 0, 0, 0, 0, 0, 1, 0])\n', (369, 401), True, 'import numpy as np\n'), ((545, 557), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (555, 557), True, 'import matplotlib.pyplot as plt\n'), ((295, 312), 'filterpy.discrete_bayes.normalize', 'normalize', (['belief'], {}), '(belief)\n', (304, 312), False, 'from filterpy.discrete_bayes import normalize\n'), ((558, 577), 'code.book_plots.bar_plot', 'bp.bar_plot', (['belief'], {}), '(belief)\n', (569, 577), True, 'import code.book_plots as bp\n')] |
import pytest
from pytest_django.fixtures import _django_db_fixture_helper
@pytest.fixture(scope="session", autouse=True)
def db_session(request, django_db_setup, django_db_blocker):
"""
Changed scope to 'session'
"""
if "django_db_reset_sequences" in request.funcargnames:
request.getfixturevalue("django_db_reset_sequences")
if (
"transactional_db" in request.funcargnames
or "live_server" in request.funcargnames
):
request.getfixturevalue("transactional_db")
else:
_django_db_fixture_helper(request, django_db_blocker, transactional=False)
| [
"pytest.fixture",
"pytest_django.fixtures._django_db_fixture_helper"
] | [((79, 124), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (93, 124), False, 'import pytest\n'), ((540, 614), 'pytest_django.fixtures._django_db_fixture_helper', '_django_db_fixture_helper', (['request', 'django_db_blocker'], {'transactional': '(False)'}), '(request, django_db_blocker, transactional=False)\n', (565, 614), False, 'from pytest_django.fixtures import _django_db_fixture_helper\n')] |
import os
import json
def load_data(filepath="data", filename="arrays.json"):
full_file_path=os.path.join(filepath, filename)
with open(full_file_path, 'r') as f:
lst = json.load(f)
return lst
def check_data(data):
error = "Data Error: {}"
passed = True
if not data:
passed = False
error = error.format("no data given")
elif len(data['array'])!=500:
passed = False
error = error.format("you did not submit a list of 500 numbers")
else:
try:
arr = [float(i) for i in data['array']]
except:
passed = False
error = error.format("array contains non numerical information")
if passed:
error = None
return [passed, error]
def add_data(data=None, filepath="data", filename="arrays.json"):
full_file_path = os.path.join(filepath, filename)
lst = load_data(filepath=filepath, filename=filename)
res = check_data(data)
if res[0]:
lst.append(data)
with open(full_file_path, 'w') as f:
lst = json.dump(lst, f)
return None
else:
return (404, res)
def get_recent_array(filepath="data", filename="arrays.json"):
full_file_path = os.path.join(filepath, filename)
with open(full_file_path, 'r') as f:
lst = json.load(f)
data = lst[-1]
data['array'] = sorted(data['array'])
return data
if __name__=="__main__":
arr = get_recent_array()
print(arr) | [
"json.load",
"os.path.join",
"json.dump"
] | [((98, 130), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (110, 130), False, 'import os\n'), ((849, 881), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (861, 881), False, 'import os\n'), ((1230, 1262), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (1242, 1262), False, 'import os\n'), ((186, 198), 'json.load', 'json.load', (['f'], {}), '(f)\n', (195, 198), False, 'import json\n'), ((1318, 1330), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1327, 1330), False, 'import json\n'), ((1070, 1087), 'json.dump', 'json.dump', (['lst', 'f'], {}), '(lst, f)\n', (1079, 1087), False, 'import json\n')] |
from django.shortcuts import render
from django.db.models import Avg
from register.models import Project
from projects.models import Task
from projects.forms import TaskRegistrationForm
from projects.forms import ProjectRegistrationForm
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
# Create your views here.
def projects(request):
print("HERE")
print(request.POST.get('task_id'))
print(request.is_ajax())
if request.method == 'POST':
print("POST")
print(request.is_ajax())
task_id = request.POST.get('task_id')
myUser = User.objects.get(pk=request.user.id)
myTask = Task.objects.get(pk = task_id)
myTask.claimed.add(myUser) #add the user to the task
return JsonResponse({'status': 'ok'})
projects = Project.objects.all()
tasks = Task.objects.all()
open_tasks = tasks.filter(status='Created')
my_tasks = tasks.filter(claimed__in = [request.user.id]).values_list('id', flat = True)
proj_dict = {}
context = {
'projects' : projects,
'tasks' : tasks,
'open_tasks' : open_tasks,
'my_tasks': my_tasks,
}
print(my_tasks)
return render(request, 'projects/projects.html', context)
def newTask(request):
if request.method == 'POST':
form = TaskRegistrationForm(request.POST)
context = {'form': form}
if form.is_valid():
instance = form.save(commit = False)
instance.status = "Created"
instance.save()
created = True
context = {
'created': created,
'form': form,
}
return render(request, 'projects/new_task.html', context)
else:
return render(request, 'projects/new_task.html', context)
else:
form = TaskRegistrationForm()
context = {
'form': form,
}
return render(request,'projects/new_task.html', context)
def newProject(request):
if request.method == 'POST':
form = ProjectRegistrationForm(request.POST)
context = {'form': form}
if form.is_valid():
instance = form.save(commit = False)
instance.declared = User.objects.get(pk=request.user.id)
instance.status = "Created"
instance.save()
created = True
form = ProjectRegistrationForm()
context = {
'created': created,
'form': form,
}
return render(request, 'projects/new_project.html', context)
else:
print("bad here")
return render(request, 'projects/new_project.html', context)
else:
form = ProjectRegistrationForm()
context = {
'form': form,
}
return render(request,'projects/new_project.html', context) | [
"django.shortcuts.render",
"django.http.JsonResponse",
"projects.forms.ProjectRegistrationForm",
"projects.models.Task.objects.all",
"projects.models.Task.objects.get",
"register.models.Project.objects.all",
"django.contrib.auth.models.User.objects.get",
"projects.forms.TaskRegistrationForm"
] | [((908, 929), 'register.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (927, 929), False, 'from register.models import Project\n'), ((942, 960), 'projects.models.Task.objects.all', 'Task.objects.all', ([], {}), '()\n', (958, 960), False, 'from projects.models import Task\n'), ((1294, 1344), 'django.shortcuts.render', 'render', (['request', '"""projects/projects.html"""', 'context'], {}), "(request, 'projects/projects.html', context)\n", (1300, 1344), False, 'from django.shortcuts import render\n'), ((701, 737), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': 'request.user.id'}), '(pk=request.user.id)\n', (717, 737), False, 'from django.contrib.auth.models import User\n'), ((755, 783), 'projects.models.Task.objects.get', 'Task.objects.get', ([], {'pk': 'task_id'}), '(pk=task_id)\n', (771, 783), False, 'from projects.models import Task\n'), ((862, 892), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (874, 892), False, 'from django.http import JsonResponse\n'), ((1416, 1450), 'projects.forms.TaskRegistrationForm', 'TaskRegistrationForm', (['request.POST'], {}), '(request.POST)\n', (1436, 1450), False, 'from projects.forms import TaskRegistrationForm\n'), ((1939, 1961), 'projects.forms.TaskRegistrationForm', 'TaskRegistrationForm', ([], {}), '()\n', (1959, 1961), False, 'from projects.forms import TaskRegistrationForm\n'), ((2033, 2083), 'django.shortcuts.render', 'render', (['request', '"""projects/new_task.html"""', 'context'], {}), "(request, 'projects/new_task.html', context)\n", (2039, 2083), False, 'from django.shortcuts import render\n'), ((2157, 2194), 'projects.forms.ProjectRegistrationForm', 'ProjectRegistrationForm', (['request.POST'], {}), '(request.POST)\n', (2180, 2194), False, 'from projects.forms import ProjectRegistrationForm\n'), ((2833, 2858), 'projects.forms.ProjectRegistrationForm', 'ProjectRegistrationForm', ([], {}), '()\n', (2856, 2858), False, 'from projects.forms import ProjectRegistrationForm\n'), ((2930, 2983), 'django.shortcuts.render', 'render', (['request', '"""projects/new_project.html"""', 'context'], {}), "(request, 'projects/new_project.html', context)\n", (2936, 2983), False, 'from django.shortcuts import render\n'), ((1779, 1829), 'django.shortcuts.render', 'render', (['request', '"""projects/new_task.html"""', 'context'], {}), "(request, 'projects/new_task.html', context)\n", (1785, 1829), False, 'from django.shortcuts import render\n'), ((1863, 1913), 'django.shortcuts.render', 'render', (['request', '"""projects/new_task.html"""', 'context'], {}), "(request, 'projects/new_task.html', context)\n", (1869, 1913), False, 'from django.shortcuts import render\n'), ((2337, 2373), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': 'request.user.id'}), '(pk=request.user.id)\n', (2353, 2373), False, 'from django.contrib.auth.models import User\n'), ((2488, 2513), 'projects.forms.ProjectRegistrationForm', 'ProjectRegistrationForm', ([], {}), '()\n', (2511, 2513), False, 'from projects.forms import ProjectRegistrationForm\n'), ((2637, 2690), 'django.shortcuts.render', 'render', (['request', '"""projects/new_project.html"""', 'context'], {}), "(request, 'projects/new_project.html', context)\n", (2643, 2690), False, 'from django.shortcuts import render\n'), ((2754, 2807), 'django.shortcuts.render', 'render', (['request', '"""projects/new_project.html"""', 'context'], {}), "(request, 'projects/new_project.html', context)\n", (2760, 2807), False, 'from django.shortcuts import render\n')] |
import unittest
from test.test_utils import get_repository
from unittest.mock import Mock
from autopr.database import Database
class DatabaseTest(unittest.TestCase):
def test_needs_pulling_empty(self):
db = Database()
self.assertTrue(db.needs_pulling())
def test_needs_pulling_not_empty(self):
db = Database(user=Mock())
self.assertFalse(db.needs_pulling())
def test_reset_empty(self):
db = Database(user=Mock(), repositories=[])
db.reset()
self.assertEqual(0, len(db.repositories))
def test_reset_non_empty(self):
repo_first = get_repository("first")
repo_first.done = True
repo_second = get_repository("second")
db = Database(
user=Mock(),
repositories=[
repo_first,
repo_second,
],
)
self.assertTrue(db.repositories[0].done)
self.assertFalse(db.repositories[1].done)
db.reset()
self.assertFalse(db.repositories[0].done)
self.assertFalse(db.repositories[1].done)
def test_merge_into(self):
db_first = Database(
user=Mock(),
repositories=[
get_repository("first"),
get_repository("second"),
],
)
db_second = Database(
user=Mock(),
repositories=[
get_repository("third"),
get_repository("fourth"),
],
)
db_first.merge_into(db_second)
self.assertEqual(4, len(db_first.repositories))
self.assertEqual("first", db_first.repositories[0].name)
self.assertEqual("fourth", db_first.repositories[3].name)
def test_repositories_to_process(self):
db = Database(
user=Mock(),
repositories=[
get_repository("removed", removed=True),
get_repository("done", done=True),
get_repository("non-removed"),
],
)
repositories = db.repositories_to_process()
self.assertEqual(1, len(repositories))
self.assertEqual("non-removed", repositories[0].name)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"autopr.database.Database",
"unittest.mock.Mock",
"test.test_utils.get_repository"
] | [((2230, 2245), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2243, 2245), False, 'import unittest\n'), ((222, 232), 'autopr.database.Database', 'Database', ([], {}), '()\n', (230, 232), False, 'from autopr.database import Database\n'), ((614, 637), 'test.test_utils.get_repository', 'get_repository', (['"""first"""'], {}), "('first')\n", (628, 637), False, 'from test.test_utils import get_repository\n'), ((691, 715), 'test.test_utils.get_repository', 'get_repository', (['"""second"""'], {}), "('second')\n", (705, 715), False, 'from test.test_utils import get_repository\n'), ((349, 355), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (353, 355), False, 'from unittest.mock import Mock\n'), ((462, 468), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (466, 468), False, 'from unittest.mock import Mock\n'), ((757, 763), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (761, 763), False, 'from unittest.mock import Mock\n'), ((1173, 1179), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1177, 1179), False, 'from unittest.mock import Mock\n'), ((1364, 1370), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1368, 1370), False, 'from unittest.mock import Mock\n'), ((1820, 1826), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1824, 1826), False, 'from unittest.mock import Mock\n'), ((1224, 1247), 'test.test_utils.get_repository', 'get_repository', (['"""first"""'], {}), "('first')\n", (1238, 1247), False, 'from test.test_utils import get_repository\n'), ((1265, 1289), 'test.test_utils.get_repository', 'get_repository', (['"""second"""'], {}), "('second')\n", (1279, 1289), False, 'from test.test_utils import get_repository\n'), ((1415, 1438), 'test.test_utils.get_repository', 'get_repository', (['"""third"""'], {}), "('third')\n", (1429, 1438), False, 'from test.test_utils import get_repository\n'), ((1456, 1480), 'test.test_utils.get_repository', 'get_repository', (['"""fourth"""'], {}), "('fourth')\n", (1470, 1480), False, 'from test.test_utils import get_repository\n'), ((1871, 1910), 'test.test_utils.get_repository', 'get_repository', (['"""removed"""'], {'removed': '(True)'}), "('removed', removed=True)\n", (1885, 1910), False, 'from test.test_utils import get_repository\n'), ((1928, 1961), 'test.test_utils.get_repository', 'get_repository', (['"""done"""'], {'done': '(True)'}), "('done', done=True)\n", (1942, 1961), False, 'from test.test_utils import get_repository\n'), ((1979, 2008), 'test.test_utils.get_repository', 'get_repository', (['"""non-removed"""'], {}), "('non-removed')\n", (1993, 2008), False, 'from test.test_utils import get_repository\n')] |
import math
import collections
import intergalactic
'''
gas_H = diffuse gas in the galactic Halo
gas_D = diffuse gas in the galactic Disc
cloud_H = molecular gas in the halo
cloud_D = molecular gas in the disc
f = infall rate
Stars are divided in two groups:
1) Low/intermediate mass stars (m <= 4 Msun)
2) Massive stars (m > 4 Msun)
So the 1 and 2 subscripts refer to this groups.
The d and h subscripts correspond to disc and halo.
Kh = proportionality factor of the SF in the halo
Kc = proportionality factor of the SF in the cloud formation
Ks = proportionality factor of the SF in the cloud-cloud collision
Ka = proportionality factor of the SF in the cloud-massive stars interactions
Since stars are divided in two groups, the parameters
involving Star Formation are divided in two groups too:
Kh = Kh1 + Kh2
Kc = Kc1 + Kc2
Ks = Ks1 + Ks2 + Ks'
Ka = Ka1 + Ka2 + Ka'
where Ks' and Ka' refer to the restitution of diffuse gas
due to the collision and interaction processes
D1 = death rates for low/intermediate mass stars
D2 = death rates for massive stars
Wd = Restitution rate in the disc
Wh = Restitution rate in the halo
'''
# Constants
region_width_kpc = 1 # width of the region in kiloparsecs
region_galactocentric_radio_kpc = 8 # distance of the region to the galactic center in kiloparsecs
halo_radio_kpc = 260.996 # radio of the halo in kiloparsecs
disk_height_kpc = 0.2 # height of the disk in kiloparsecs
G = 0.44985 # Gravitational constant in Kpc^3/(10^9Msun * 10^7yrs)
virial_mass = 1e12 # Virial mass of the dark matter halo
disc_barionic_mass = 1e11 # Mass of the barionic disc
def model():
# Initial values of the system (y)
initial_values = collections.defaultdict(float)
initial_values['gas_halo'] = virial_mass
initial_values['gas_disk'] = disc_barionic_mass
gas_H = initial_values['gas_halo']
gas_D = initial_values['gas_disk']
n = 1.5
gas_H_n = gas_H ** n
gas_D_n = gas_D ** n
molecular_gas_H = initial_values['molecular_gas_halo']
molecular_gas_D = initial_values['molecular_gas_disk']
S1h = initial_values['s_low_halo']
S2h = initial_values['s_massive_halo']
S1d = initial_values['s_low_disk']
S2d = initial_values['s_massive_disk']
Kh1, Kh2 = star_formation_factor_halo()
Kc = star_formation_factor_cloud()
Ka1, Ka2, Ka_rest = star_formation_cloud_massive_stars_factor()
Ks1, Ks2, Ks_rest = star_formation_cloud_collisions_factor()
f = 1
Wd = 0
Wh = 0
D1d = 0
D2d = 0
D1h = 0
D2h = 0
c = molecular_gas_D
c2= c ** 2
# Derivatives (ẏ)
equations = {'halo': {}, 'disk': {}}
equations['disk']['gas'] = (-Kc * gas_D_n) + (Ka_rest * c * S2d) + (Ks_rest * c2) + (f * gas_H) + Wd
equations['disk']['cloud'] = (Kc * gas_D_n) - ((Ka1 + Ka2 + Ka_rest) * c * S2d) - ((Ks1 + Ks2 + Ks_rest) * c2)
equations['disk']['stars_low'] = (Ks1 * c2) + (Ka1 * c * S2d) - D1d
equations['disk']['stars_massive'] = (Ks2 * c2) + (Ka2 * c * S2d) - D2d
equations['disk']['remnants'] = D1d + D2d - Wd
equations['halo']['gas'] = -((Kh1 + Kh2) * gas_H_n) - (f * gas_H) + Wh
equations['halo']['cloud'] = 0.0
equations['halo']['stars_low'] = (Kh1 * gas_H_n) - D1h
equations['halo']['stars_massive'] = (Kh2 * gas_H_n) - D2h
equations['halo']['remnants'] = D1h + D2h - Wh
return equations
def integrator(state):
pass
def star_formation_factor_halo():
efficiency = 2.173 # epsilon_h computed for a best value K_h = 9e-3 able to reproduce SFR and abundances of MWG halo
factor = efficiency * (G / volume_halo())**0.5
return [factor*0.5, factor*0.5]
def star_formation_factor_cloud():
return 1
def star_formation_cloud_massive_stars_factor():
return [1, 1, 0.1]
def star_formation_cloud_collisions_factor():
return [1, 1, 0.1]
def volume_halo(region_shape='square'):
h = math.sqrt((halo_radio_kpc ** 2) - (region_galactocentric_radio_kpc ** 2))
if region_shape == 'square':
square_area = region_width_kpc * region_width_kpc
return square_area * 2 * h
elif region_shape == 'ring':
half_ring_width = 0.5 * region_width_kpc
ring_area = math.pi * (
(region_galactocentric_radio_kpc + half_ring_width) ** 2 -
(region_galactocentric_radio_kpc - half_ring_width) ** 2)
return ring_area * 2 * h
else:
raise Exception("Wrong region shape. Allowed options: [square, ring]")
def volume_disk(region_shape='square'):
if region_shape == 'square':
square_area = region_width_kpc * region_width_kpc
return square_area * disk_height_kpc
elif region_shape == 'ring':
half_ring_width = 0.5 * region_width_kpc
ring_area = math.pi * (
(region_galactocentric_radio_kpc + half_ring_width) ** 2 -
(region_galactocentric_radio_kpc - half_ring_width) ** 2)
return ring_area * disk_height_kpc
else:
raise Exception("Wrong region shape. Allowed options: [square, ring]")
| [
"math.sqrt",
"collections.defaultdict"
] | [((1749, 1779), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (1772, 1779), False, 'import collections\n'), ((3944, 4013), 'math.sqrt', 'math.sqrt', (['(halo_radio_kpc ** 2 - region_galactocentric_radio_kpc ** 2)'], {}), '(halo_radio_kpc ** 2 - region_galactocentric_radio_kpc ** 2)\n', (3953, 4013), False, 'import math\n')] |
"""demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
from api.video import views
from rest_framework.authtoken.views import obtain_auth_token
from rest_framework_swagger.views import get_swagger_view
from rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer
from rest_framework.schemas import get_schema_view
from api.video.views import VideoList, VideoARList, VideoRecordsView
from api.video.views import CustomAuthToken
schema_view = get_swagger_view(title='Pastebin API')
urlpatterns = [
path('index/', views.index),
path('admin/', admin.site.urls),
path('openapi', get_schema_view(
title="Your Project",
description="API for all things …"
), name='openapi-schema'),
path('swagger-ui/', TemplateView.as_view(
template_name='swagger-ui.html',
extra_context={'schema_url':'openapi-schema'}
), name='swagger-ui'),
path('redoc/', TemplateView.as_view(
template_name='redoc.html',
extra_context={'schema_url':'openapi-schema'}
), name='redoc'),
path('uuids/<str:uuid>/', VideoList.as_view()),
path('aspects/<str:aspect_ratio>/', VideoARList.as_view()),
path('videos/', VideoRecordsView.as_view()),
path('docapi', schema_view),
path('api-token-auth/', CustomAuthToken.as_view()),
]
| [
"django.views.generic.TemplateView.as_view",
"api.video.views.VideoList.as_view",
"rest_framework_swagger.views.get_swagger_view",
"rest_framework.schemas.get_schema_view",
"api.video.views.VideoRecordsView.as_view",
"api.video.views.CustomAuthToken.as_view",
"django.urls.path",
"api.video.views.Video... | [((1151, 1189), 'rest_framework_swagger.views.get_swagger_view', 'get_swagger_view', ([], {'title': '"""Pastebin API"""'}), "(title='Pastebin API')\n", (1167, 1189), False, 'from rest_framework_swagger.views import get_swagger_view\n'), ((1213, 1240), 'django.urls.path', 'path', (['"""index/"""', 'views.index'], {}), "('index/', views.index)\n", (1217, 1240), False, 'from django.urls import path, include\n'), ((1246, 1277), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1250, 1277), False, 'from django.urls import path, include\n'), ((1910, 1937), 'django.urls.path', 'path', (['"""docapi"""', 'schema_view'], {}), "('docapi', schema_view)\n", (1914, 1937), False, 'from django.urls import path, include\n'), ((1299, 1372), 'rest_framework.schemas.get_schema_view', 'get_schema_view', ([], {'title': '"""Your Project"""', 'description': '"""API for all things …"""'}), "(title='Your Project', description='API for all things …')\n", (1314, 1372), False, 'from rest_framework.schemas import get_schema_view\n'), ((1444, 1550), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""swagger-ui.html"""', 'extra_context': "{'schema_url': 'openapi-schema'}"}), "(template_name='swagger-ui.html', extra_context={\n 'schema_url': 'openapi-schema'})\n", (1464, 1550), False, 'from django.views.generic import TemplateView\n'), ((1607, 1708), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""redoc.html"""', 'extra_context': "{'schema_url': 'openapi-schema'}"}), "(template_name='redoc.html', extra_context={\n 'schema_url': 'openapi-schema'})\n", (1627, 1708), False, 'from django.views.generic import TemplateView\n'), ((1771, 1790), 'api.video.views.VideoList.as_view', 'VideoList.as_view', ([], {}), '()\n', (1788, 1790), False, 'from api.video.views import VideoList, VideoARList, VideoRecordsView\n'), ((1833, 1854), 'api.video.views.VideoARList.as_view', 'VideoARList.as_view', ([], {}), '()\n', (1852, 1854), False, 'from api.video.views import VideoList, VideoARList, VideoRecordsView\n'), ((1877, 1903), 'api.video.views.VideoRecordsView.as_view', 'VideoRecordsView.as_view', ([], {}), '()\n', (1901, 1903), False, 'from api.video.views import VideoList, VideoARList, VideoRecordsView\n'), ((1967, 1992), 'api.video.views.CustomAuthToken.as_view', 'CustomAuthToken.as_view', ([], {}), '()\n', (1990, 1992), False, 'from api.video.views import CustomAuthToken\n')] |
# mfield / mfield.py
import numpy as np
try:
import matlab
import matlab.engine
except ImportError:
pass
import time
import io
import os
class MField(object):
'''
Implementation of FIELD II using the MATLAB engine for python.
'''
def __init__(self, path=None):
# set default path to location of m-files (where this module is)
if path is None:
path = os.path.dirname(os.path.abspath(__file__))
# try, for at most 1 minute, to start MATLAB engine
for i in range(6):
try:
self._mateng = matlab.engine.start_matlab()
break
except (matlab.engine.EngineError, TypeError):
time.sleep(10)
# set MATLAB engine path to location of m-files
self._mateng.cd(str(os.path.normpath(path)), nargout=0)
def __del__(self):
# self.field_end() # end FIELD II
self._mateng.quit() # shutdown MATLAB engine
def _numpy_to_mat(self, array, orient='row'):
if array.ndim == 1:
if orient.lower() == 'row':
sz = (1, array.size)
elif orient.lower() in ('col', 'column'):
sz = (array.size, 1)
else:
sz = None
ret = matlab.double(initializer=array.tolist(), size=sz)
return ret
def _mat_to_numpy(self, array):
return np.array(array).squeeze()
## FIELD FUNCTIONS ##
def field_init(self, suppress=-1):
self._mateng.field_init(suppress, nargout=0)
def field_end(self):
self._mateng.field_end(nargout=0, stdout=io.StringIO())
def set_field(self, option_name, value):
self._mateng.set_field(option_name, value, nargout=0)
def field_info(self):
self._mateng.field_info(nargout=0)
## CALC FUNCTIONS ##
def calc_scat(self, Th1, Th2, points, amplitudes):
points_mat = self._numpy_to_mat(points, orient='row')
amplitudes_mat = self._numpy_to_mat(amplitudes, orient='col')
ret = self._mateng.calc_scat(Th1, Th2, points_mat, amplitudes_mat,
nargout=2, stdout=io.StringIO())
scat = self._mat_to_numpy(ret[0])
t0 = ret[1]
return scat, t0
def calc_scat_all(self, Th1, Th2, points, amplitudes, dec_factor):
points_mat = self._numpy_to_mat(points, orient='row')
amplitudes_mat = self._numpy_to_mat(amplitudes, orient='col')
ret = self._mateng.calc_scat_all(Th1, Th2, points_mat, amplitudes_mat,
dec_factor, nargout=2, stdout=io.StringIO())
scat = self._mat_to_numpy(ret[0])
t0 = ret[1]
return scat, t0
def calc_scat_multi(self, Th1, Th2, points, amplitudes):
points_mat = self._numpy_to_mat(points, orient='row')
amplitudes_mat = self._numpy_to_mat(amplitudes, orient='col')
ret = self._mateng.calc_scat_multi(Th1, Th2, points_mat, amplitudes_mat,
nargout=2, stdout=io.StringIO())
scat = self._mat_to_numpy(ret[0])
t0 = ret[1]
return scat, t0
def calc_h(self, Th, points):
points_mat = self._numpy_to_mat(points, orient='row')
ret = self._mateng.calc_h(Th, points_mat, nargout=2, stdout=io.StringIO())
h = self._mat_to_numpy(ret[0])
t0 = ret[1]
return h, t0
def calc_hp(self, Th, points):
points_mat = self._numpy_to_mat(points, orient='row')
ret = self._mateng.calc_hp(Th, points_mat, nargout=2, stdout=io.StringIO())
hp = self._mat_to_numpy(ret[0])
t0 = ret[1]
return hp, t0
def calc_hhp(self, Th1, Th2, points):
points_mat = self._numpy_to_mat(points, orient='row')
ret = self._mateng.calc_hhp(Th1, Th2, points_mat, nargout=2, stdout=io.StringIO())
hhp = self._mat_to_numpy(ret[0])
t0 = ret[1]
return hhp, t0
## XDC FUNCTIONS ##
def xdc_impulse(self, Th, pulse):
pulse_mat = self._numpy_to_mat(pulse, orient='row')
self._mateng.xdc_impulse(Th, pulse_mat, nargout=0)
def xdc_excitation(self, Th, pulse):
pulse_mat = self._numpy_to_mat(pulse, orient='row')
self._mateng.xdc_excitation(Th, pulse_mat, nargout=0)
def xdc_linear_array(self, no_elements, width, height, kerf, no_sub_x,
no_sub_y, focus):
focus_mat = self._numpy_to_mat(focus, orient='row')
ret = self._mateng.xdc_linear_array(no_elements, width, height, kerf,
no_sub_x, no_sub_y, focus_mat, nargout=1)
return ret
def xdc_show(self, Th, info_type='all'):
self._mateng.xdc_show(Th, info_type, nargout=0)
def xdc_focus(self, Th, times, points):
times_mat = self._numpy_to_mat(times, orient='col')
points_mat = self._numpy_to_mat(points, orient='row')
self._mateng.xdc_focus(Th, times_mat, points_mat, nargout=0)
def xdc_focus_times(self, Th, times, delays):
times_mat = self._numpy_to_mat(times, orient='col')
delays_mat = self._numpy_to_mat(delays, orient='row')
self._mateng.xdc_focus_times(Th, times_mat, delays_mat, nargout=0)
def xdc_free(self, Th):
self._mateng.xdc_free(Th, nargout=0)
def xdc_get(self, Th, info_type='rect'):
ret = self._mat_to_numpy(self._mateng.xdc_get(Th, info_type, nargout=1))
return ret
def xdc_rectangles(self, rect, center, focus):
rect_mat = self._numpy_to_mat(rect, orient='row')
center_mat = self._numpy_to_mat(center, orient='row')
focus_mat = self._numpy_to_mat(focus, orient='row')
ret = self._mateng.xdc_rectangles(rect_mat, center_mat, focus_mat,
nargout=1)
return ret
def xdc_focused_array(self, no_elements, width, height, kerf, rfocus, no_sub_x, no_sub_y, focus):
focus_mat = self._numpy_to_mat(focus, orient='row')
ret = self._mateng.xdc_focused_array(no_elements, width, height, kerf,
rfocus, no_sub_x, no_sub_y, focus_mat, nargout=1)
return ret
def xdc_piston(self, radius, ele_size):
ret = self._mateng.xdc_piston(radius, ele_size)
return ret
def xdc_apodization(self, Th, times, values):
times_mat = self._numpy_to_mat(times, orient='col')
values_mat = self._numpy_to_mat(values, orient='row')
self._mateng.xdc_apodization(Th, times_mat, values_mat, nargout=0)
def xdc_quantization(self, Th, value):
self._mateng.xdc_quantization(Th, value, nargout=0)
def xdc_2d_array(self):
raise NotImplementedError
def xdc_concave(self, radius, focal_radius, ele_size):
ret = self._mateng.xdc_concave(radius, focal_radius, ele_size)
return ret
def xdc_convex_array(self):
raise NotImplementedError
def xdc_convex_focused_array(self):
raise NotImplementedError
## ELE FUNCTIONS ##
def ele_apodization(self, Th, element_no, apo):
element_no_mat = self._numpy_to_mat(element_no, orient='col')
apo_mat = self._numpy_to_mat(apo, orient='row')
self._mateng.ele_apodization(Th, element_no_mat, apo_mat, nargout=0)
def ele_delay(self, Th, element_no, delays):
element_no_mat = self._numpy_to_mat(element_no, orient='col')
delays_mat = self._numpy_to_mat(delays, orient='row')
self._mateng.ele_delay(Th, element_no_mat, delays_mat, nargout=0)
## TEST ##
if __name__ == '__main__':
# from scipy.signal import gausspulse
from .. simulations import sim_functions as sim
field = MField()
field.field_init()
field.set_field('c', 1500)
field.set_field('fs', 100e6)
field.set_field('att', 0)
field.set_field('freq_att', 10e6)
field.set_field('att_f0', 0)
field.set_field('use_att', 1)
fc = 10e6
fbw = 1.0
fs = 100e6
pulse, t = sim.gausspulse(fc, fbw, fs)
tx = field.xdc_linear_array(64, 0.0002, 0.001, 300e-6, 1, 2, np.array([0, 0, 0.03]))
field.xdc_impulse(tx, pulse)
field.xdc_excitation(tx, np.array([1]))
field.field_info()
# field.xdc_show(tx)
scat, t0 = field.calc_scat_multi(tx, tx, np.array([0, 0, 0.03]), np.array([1]))
field.field_end()
field.close()
| [
"matlab.engine.start_matlab",
"time.sleep",
"os.path.normpath",
"numpy.array",
"os.path.abspath",
"io.StringIO"
] | [((7976, 7998), 'numpy.array', 'np.array', (['[0, 0, 0.03]'], {}), '([0, 0, 0.03])\n', (7984, 7998), True, 'import numpy as np\n'), ((8062, 8075), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8070, 8075), True, 'import numpy as np\n'), ((8172, 8194), 'numpy.array', 'np.array', (['[0, 0, 0.03]'], {}), '([0, 0, 0.03])\n', (8180, 8194), True, 'import numpy as np\n'), ((8196, 8209), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8204, 8209), True, 'import numpy as np\n'), ((428, 453), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (443, 453), False, 'import os\n'), ((591, 619), 'matlab.engine.start_matlab', 'matlab.engine.start_matlab', ([], {}), '()\n', (617, 619), False, 'import matlab\n'), ((817, 839), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (833, 839), False, 'import os\n'), ((1395, 1410), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (1403, 1410), True, 'import numpy as np\n'), ((1617, 1630), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1628, 1630), False, 'import io\n'), ((2131, 2144), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2142, 2144), False, 'import io\n'), ((2561, 2574), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2572, 2574), False, 'import io\n'), ((2971, 2984), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2982, 2984), False, 'import io\n'), ((3241, 3254), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3252, 3254), False, 'import io\n'), ((3507, 3520), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3518, 3520), False, 'import io\n'), ((3789, 3802), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3800, 3802), False, 'import io\n'), ((717, 731), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (727, 731), False, 'import time\n')] |
from past.builtins import basestring
from builtins import object
from itertools import chain
import re
# from copy import copy
import operator
from collections import OrderedDict
from nineml.exceptions import (
NineMLUsageError, NineMLNameError, NineMLInvalidElementTypeException)
from .visitors.cloner import Cloner
from .visitors.queriers import ObjectFinder
from .visitors.equality import EqualityChecker, Hasher, MismatchFinder
from functools import reduce
def sort_key(elem):
return elem.sort_key
def hash_non_str(key):
if not isinstance(key, basestring):
key = hash(key)
return key
camel_caps_re = re.compile(r'([a-z])([A-Z])')
class BaseNineMLObject(object):
"""
Base class for all 9ML-type classes
"""
nineml_type_v1 = None
nineml_attr = ()
nineml_child = {}
nineml_children = ()
# Used to distinguish between permanent objects and those that are created
# on the fly, such as the ones used to duck-type MultiDynamics objects with
# Dynamics objects
temporary = False
# Specifies whether a serialized object has a "body" (i.e. in XML)
has_serial_body = False
@classmethod
def _sorted_values(self, container):
if isinstance(container, dict):
container = iter(dict.values())
return sorted(container)
def __repr__(self):
return "{}(name='{}')".format(self.nineml_type, self.key)
def __str__(self):
return repr(self)
def __eq__(self, other):
return self.equals(other)
def __hash__(self):
return Hasher().hash(self)
def __ne__(self, other):
return not self == other
@property
def id(self):
"""
An ID used to distinguish two objects from each other.
If a "non-temporary" object (i.e. not a namespace escaped object
generated on the fly to duck type with a base class, such as
_MultiRegime, _NamespaceParameter, etc...) then this method returns
the address of the object in memory.
If a temporary object, this is the ID of its parent combined with
its class type and unique key
"""
if self.temporary:
# Create a unique string from the id of the parent (which should
# be anchored in the memory location of a "non-temporary" object)
# plus the name of the type and its key.
try:
parent_id = hex(self._parent.id)
except TypeError:
parent_id = self._parent.id # Temporary object ID
id_ = parent_id + type(self).__name__ + '_' + str(self.key)
else:
id_ = id(self)
return id_
def equals(self, other, **kwargs):
checker = EqualityChecker(**kwargs)
return checker.check(self, other, **kwargs)
def find_mismatch(self, other, **kwargs):
finder = MismatchFinder(**kwargs)
return finder.find(self, other, **kwargs)
def clone(self, cloner=None, name=None, **kwargs):
"""
General purpose clone operation, which copies the attributes used
to define equality between 9ML objects. Other attributes, such as
the document the 9ML object belongs to are re-initialized. Use this
in favour of Python's copy and deepcopy functions unless you know what
you want (i.e. things are likely to break if you are not careful).
Parameters
----------
cloner : Cloner
A Cloner instance to be used to clone the object with. If None,
a new instance is created using the **kwargs
name : str
A new name for the clone. If none the original name is kept.
exclude_annotations : bool
Flags that annotations should be omitted from the clone
"""
if cloner is None:
cloner = Cloner(**kwargs)
clone = cloner.clone(self, **kwargs)
if name is not None:
clone._name = name
return clone
def find(self, nineml_obj):
"""
Finds the element within the container that equals the given
element
Parameters
----------
nineml_obj : BaseNineMLObject
The object to find within the container
"""
return ObjectFinder(nineml_obj, self).found
def write(self, url, **kwargs):
"""
Serialize and writes the 9ML object to file
Parameters
----------
url : str
A path on the local file system (either absoluate or relative).
The format for the serialization is written in is determined by the
extension of the url.
register : bool
Whether to store the document in the cache after writing
version : str | float | int
The version to serialize the NineML objects to
"""
nineml.write(url, self, **kwargs)
def serialize(self, **kwargs):
"""
Serializes a NineML object into a serialized element
Parameters
----------
format : str
The name of the format (which matches a key format_to_serializer)
version : str | float | int
The version to serialize the NineML objects to
document : Document
The document to write local references to
to_str : bool
To serialize to a string instead of a serial element.
"""
return nineml.serialize(self, **kwargs)
@classmethod
def unserialize(cls, serial_elem, format, version, **kwargs): # @ReservedAssignment @IgnorePep8
"""
Unserializes a serial element to the given NineML class
Parameters
----------
serial_elem : <serial-element>
A serial element in the format given
format : str
The name of the format (which matches a key format_to_serializer)
version : str | float | int
The version to serialize the NineML objects to
document : Document | None
The document to read local references from
url : URL | None
The url to assign to the unserialized object
root : <serial-element>
A serial element of containing the document to read local
references from
"""
return nineml.unserialize(serial_elem, cls, format=format,
version=version, **kwargs)
@property
def key(self):
"""
Key with which to uniquely identify the 9ML object from others in its
container
"""
try:
return self.name
except AttributeError:
assert False, (
"{} class does not have a name and doesn't implement the 'key'"
" property".format(self.__class__.__name__))
@property
def sort_key(self):
"""
Returns a key that can be used to sort the 9ML object with others in
its class. Typically the same as 'key' but in some classes such as
Triggers and OnConditions, which use the condition equation as a key
a string representation needs to be used instead.
"""
return self.key
@classmethod
def _child_accessor_name(cls):
return camel_caps_re.sub(r'\1_\2', cls.nineml_type).lower()
@classmethod
def _children_iter_name(cls):
name = cls._child_accessor_name()
return pluralise(name)
@classmethod
def _children_dict_name(cls):
return '_' + cls._children_iter_name()
@classmethod
def _num_children_name(cls):
return 'num_' + cls._children_iter_name()
@classmethod
def _children_keys_name(cls):
return cls._child_accessor_name() + (
'_names' if hasattr(cls, 'name') else '_keys')
class AnnotatedNineMLObject(BaseNineMLObject):
def __init__(self, annotations=None):
if annotations is None:
annotations = nineml.annotations.Annotations()
else:
assert isinstance(annotations, nineml.annotations.Annotations)
self._annotations = annotations
@property
def annotations(self):
return self._annotations
def annotations_equal(self, other, annotations_ns=[], **kwargs): # @UnusedVariable @IgnorePep8
"""
Check for equality between annotations within specified namespaces of
two 9ML objects.
Parameters
----------
annotations_ns : list(str)
List of annotation namespaces to check for in equality check
Returns
-------
equality : bool
Whether the annotations of the two 9ML objects are equal
"""
if not hasattr(self, 'annotations'):
return True
for name, ns in self.annotations:
if ns in annotations_ns:
try:
if self.annotations[(name, ns)] != other.annotations[(name,
ns)]:
return False
except NineMLNameError:
return False
return True
class DocumentLevelObject(BaseNineMLObject):
def __init__(self):
# _document is set when the object is added to a document
self._document = None
@property
def document(self):
return self._document
@property
def url(self):
if self.document is not None:
url = self.document.url
else:
url = None
return url
@property
def attributes_with_dimension(self):
return [] # To be overridden in derived classes
@property
def attributes_with_units(self):
return [] # To be overridden in derived classes
@property
def all_units(self):
return [a.units for a in self.attributes_with_units]
@property
def all_dimensions(self):
return [a.dimension for a in self.attributes_with_dimension]
def write(self, fname, **kwargs):
"""
Writes the top-level NineML object to file in XML.
"""
nineml.write(fname, self, **kwargs)
class DynamicPortsObject(BaseNineMLObject):
"""
Defines generic iterators and accessors for objects that expose
dynamic ports
"""
@property
def ports(self):
return chain(self.analog_send_ports, self.analog_receive_ports,
self.analog_reduce_ports, self.event_send_ports,
self.event_receive_ports)
def port(self, name):
try:
return self.send_port(name)
except NineMLNameError:
try:
return self.receive_port(name)
except NineMLNameError:
raise NineMLNameError(
"'{}' Dynamics object does not have a port named '{}'"
.format(self.name, name))
@property
def port_names(self):
return (p.name for p in self.ports)
def receive_port(self, name):
try:
return self.event_receive_port(name)
except NineMLNameError:
try:
return self.analog_receive_port(name)
except NineMLNameError:
try:
return self.analog_reduce_port(name)
except NineMLNameError:
raise NineMLNameError(
"'{}' Dynamics object does not have a receive port "
"named '{}'".format(self.name, name))
def send_port(self, name):
try:
return self.event_send_port(name)
except NineMLNameError:
try:
return self.analog_send_port(name)
except NineMLNameError:
raise NineMLNameError(
"'{}' Dynamics object does not have a send port "
"named '{}'".format(self.name, name))
@property
def send_ports(self):
return chain(self.analog_send_ports, self.event_send_ports)
@property
def receive_ports(self):
return chain(self.analog_receive_ports, self.analog_reduce_ports,
self.event_receive_ports)
@property
def send_port_names(self):
return chain(self.analog_send_port_names, self.event_send_port_names)
@property
def receive_port_names(self):
return chain(self.analog_receive_port_names,
self.analog_reduce_port_names,
self.event_receive_port_names)
@property
def num_send_ports(self):
return self.num_analog_send_ports + self.num_event_send_ports
@property
def num_receive_ports(self):
return (self.num_analog_receive_ports + self.num_analog_reduce_ports +
self.num_event_receive_ports)
@property
def num_analog_ports(self):
return (self.num_analog_receive_ports + self.num_analog_send_ports +
self.num_analog_reduce_ports)
@property
def num_event_ports(self):
return (self.num_event_receive_ports + self.num_event_send_ports)
@property
def num_ports(self):
return self.num_send_ports + self.num_receive_ports
@property
def analog_ports(self):
"""Returns an iterator over the local analog port objects"""
return chain(self.analog_send_ports, self.analog_receive_ports,
self.analog_reduce_ports)
@property
def analog_port_names(self):
return (p.name for p in self.analog_ports)
@property
def event_ports(self):
return chain(self.event_send_ports, self.event_receive_ports)
def analog_port(self, name):
try:
return self.analog_send_port(name)
except KeyError:
try:
return self.analog_receive_port(name)
except KeyError:
return self.analog_reduce_port(name)
def event_port(self, name):
try:
return self.event_send_port(name)
except KeyError:
return self.event_receive_port(name)
@property
def event_port_names(self):
return (p.name for p in self.event_ports)
class ContainerObject(BaseNineMLObject):
"""
An abstract base class for handling the manipulation of member objects
(which are stored in dictionaries that can be detected by member type).
Deriving classes are expected to have the 'nineml_children' class
attribute listing the classes of the children in the container.
"""
def __init__(self):
for children_type in self.nineml_children:
setattr(self, children_type._children_dict_name(), OrderedDict())
self._parent = None # Used to link up the the containing document
def add(self, *elements):
add_to_doc_visitor = nineml.document.AddToDocumentVisitor(
self.document)
for element in elements:
dct = self._member_dict(element)
if element.key in dct:
raise NineMLUsageError(
"Could not add '{}' {} to container as it clashes "
"with an existing element with the same key"
.format(element.key, type(element).__name__))
dct[element.key] = element
# Set parent if a property of the child element to add
if hasattr(element, 'parent'):
element._parent = self
# Add nested references to document
if self.document is not None:
add_to_doc_visitor.visit(element)
def remove(self, *elements):
for element in elements:
dct = self._member_dict(element)
try:
del dct[element.key]
except KeyError:
raise NineMLUsageError(
"Could not remove '{}' from container as it was not "
"found in member dictionary (use 'ignore_missing' option "
"to ignore)".format(element.key))
# Remove reference to parent if present
try:
if element.parent is self:
element._parent = None
except AttributeError:
pass
def _update_member_key(self, old_key, new_key):
"""
Updates the member key for a given element_type
"""
for child_type in self.nineml_children:
member_dict = self._member_dict(child_type)
try:
member_dict[new_key] = member_dict.pop(old_key)
except KeyError:
pass
def elements(self, child_types=None):
"""
Iterates through all the core member elements of the container. For
core 9ML objects this will be the same as those iterated by the
__iter__ magic method, where as for 9ML extensions.
"""
if child_types is None:
child_types = self.nineml_children
return chain(*(self._members_iter(child_type)
for child_type in child_types))
def element(self, name, child_types=None, include_send_ports=False):
"""
Looks a member item by "name" (identifying characteristic)
Parameters
----------
name : str
Name of the element to return
nineml_children : dict[str, str]
Mapping from element type to accessor name
include_send_ports:
As send ports will typically mask the name as an alias or
state variable (although not necessarily in MultiDynamics objects)
they are ignored unless this kwarg is set to True, in which case
they will be returned only if no state variable or alias is found.
Returns
-------
elem : NineMLBaseObject
The element corresponding to the provided 'name' argument
"""
if child_types is None:
child_types = self.nineml_children
send_port = None
for child_type in child_types:
try:
elem = self._member_accessor(child_type)(name)
# Ignore send ports as they otherwise mask
# aliases/state variables
if isinstance(elem, SendPortBase):
send_port = elem
else:
return elem # No need to wait to end of loop
except NineMLNameError:
pass
if include_send_ports and send_port is not None:
return send_port
else:
raise NineMLNameError(
"'{}' was not found in '{}' {} object"
.format(name, self.key, self.__class__.__name__))
def num_elements(self, child_types=None):
if child_types is None:
child_types = self.nineml_children
return reduce(operator.add,
(self._num_members(child_type)
for child_type in child_types))
def element_keys(self, child_types=None):
if child_types is None:
child_types = self.nineml_children
all_keys = set()
for child_type in child_types:
# Some of these do not meet the stereotypical *_names format, e.g.
# time_derivative_variables, could change these to *_keys instead
try:
for key in self._member_keys_iter(child_type):
# Because send ports can have the same name as state
# variables and aliases duplicates need to be avoided
all_keys.add(key)
except AttributeError:
pass
return iter(all_keys)
def __iter__(self):
raise TypeError("{} containers are not iterable"
.format(type(self).__name__))
def index_of(self, element):
"""
Returns the index of an element amongst others of its type.
This function can be useful during code-generation from 9ML, where the
name of an element can be replaced with a unique integer value (and
referenced elsewhere in the code).
"""
return list(self._member_keys_iter(element)).index(element.key)
def from_index(self, index, child_type):
"""
The inverse of the index_of method for retrieving an object from its
index
"""
return list(self._members_iter(child_type))[index]
def _member_accessor(self, child_type):
try:
return getattr(self, child_type._child_accessor_name())
except AttributeError:
if child_type not in self.nineml_children:
raise NineMLInvalidElementTypeException(
"{} does not have children of type {}"
.format(self, child_type))
else:
raise
def _members_iter(self, child_type):
try:
return getattr(self, child_type._children_iter_name())
except AttributeError:
if child_type not in self.nineml_children:
raise NineMLInvalidElementTypeException(
"{} does not have children of type {}"
.format(self, child_type))
else:
raise
def _member_keys_iter(self, child_type):
try:
return getattr(self, child_type._children_keys_name())
except AttributeError:
if child_type not in self.nineml_children:
raise NineMLInvalidElementTypeException(
"{} does not have children of type {}"
.format(self, child_type))
else:
raise
def _num_members(self, child_type):
try:
return getattr(self, child_type._num_children_name())
except AttributeError:
if child_type not in self.nineml_children:
raise NineMLInvalidElementTypeException(
"{} does not have children of type {}"
.format(self, child_type))
else:
raise
def _member_dict(self, child_type):
try:
return getattr(self, child_type._children_dict_name())
except AttributeError:
if child_type not in self.nineml_children:
raise NineMLInvalidElementTypeException(
"{} does not have children of type {}"
.format(self, child_type))
else:
raise
@property
def parent(self):
return self._parent
@property
def document(self):
if isinstance(self, DocumentLevelObject):
document = self._document
elif self.parent is not None:
# Otherwise return parent's document if set
document = self.parent.document
else:
document = None
return document
def pluralise(word):
if word.endswith('ies'):
word = word + 's' # Not a proper plural but we can't use an apostrophe
elif word.endswith('s') or word.endswith('h'):
word = word + 'es'
elif word.endswith('y'):
word = word[:-1] + 'ies'
else:
word = word + 's'
return word
class SendPortBase(object):
"""
Dummy class to allow look up via inheritence of SendPort in this module
without causing circular import problems
"""
import nineml # @IgnorePep8
| [
"itertools.chain",
"collections.OrderedDict",
"re.compile",
"nineml.document.AddToDocumentVisitor",
"nineml.annotations.Annotations",
"nineml.unserialize",
"nineml.write",
"nineml.serialize"
] | [((634, 662), 're.compile', 're.compile', (['"""([a-z])([A-Z])"""'], {}), "('([a-z])([A-Z])')\n", (644, 662), False, 'import re\n'), ((4877, 4910), 'nineml.write', 'nineml.write', (['url', 'self'], {}), '(url, self, **kwargs)\n', (4889, 4910), False, 'import nineml\n'), ((5450, 5482), 'nineml.serialize', 'nineml.serialize', (['self'], {}), '(self, **kwargs)\n', (5466, 5482), False, 'import nineml\n'), ((6328, 6406), 'nineml.unserialize', 'nineml.unserialize', (['serial_elem', 'cls'], {'format': 'format', 'version': 'version'}), '(serial_elem, cls, format=format, version=version, **kwargs)\n', (6346, 6406), False, 'import nineml\n'), ((10129, 10164), 'nineml.write', 'nineml.write', (['fname', 'self'], {}), '(fname, self, **kwargs)\n', (10141, 10164), False, 'import nineml\n'), ((10364, 10500), 'itertools.chain', 'chain', (['self.analog_send_ports', 'self.analog_receive_ports', 'self.analog_reduce_ports', 'self.event_send_ports', 'self.event_receive_ports'], {}), '(self.analog_send_ports, self.analog_receive_ports, self.\n analog_reduce_ports, self.event_send_ports, self.event_receive_ports)\n', (10369, 10500), False, 'from itertools import chain\n'), ((11981, 12033), 'itertools.chain', 'chain', (['self.analog_send_ports', 'self.event_send_ports'], {}), '(self.analog_send_ports, self.event_send_ports)\n', (11986, 12033), False, 'from itertools import chain\n'), ((12093, 12182), 'itertools.chain', 'chain', (['self.analog_receive_ports', 'self.analog_reduce_ports', 'self.event_receive_ports'], {}), '(self.analog_receive_ports, self.analog_reduce_ports, self.\n event_receive_ports)\n', (12098, 12182), False, 'from itertools import chain\n'), ((12260, 12322), 'itertools.chain', 'chain', (['self.analog_send_port_names', 'self.event_send_port_names'], {}), '(self.analog_send_port_names, self.event_send_port_names)\n', (12265, 12322), False, 'from itertools import chain\n'), ((12387, 12491), 'itertools.chain', 'chain', (['self.analog_receive_port_names', 'self.analog_reduce_port_names', 'self.event_receive_port_names'], {}), '(self.analog_receive_port_names, self.analog_reduce_port_names, self.\n event_receive_port_names)\n', (12392, 12491), False, 'from itertools import chain\n'), ((13334, 13421), 'itertools.chain', 'chain', (['self.analog_send_ports', 'self.analog_receive_ports', 'self.analog_reduce_ports'], {}), '(self.analog_send_ports, self.analog_receive_ports, self.\n analog_reduce_ports)\n', (13339, 13421), False, 'from itertools import chain\n'), ((13594, 13648), 'itertools.chain', 'chain', (['self.event_send_ports', 'self.event_receive_ports'], {}), '(self.event_send_ports, self.event_receive_ports)\n', (13599, 13648), False, 'from itertools import chain\n'), ((14823, 14874), 'nineml.document.AddToDocumentVisitor', 'nineml.document.AddToDocumentVisitor', (['self.document'], {}), '(self.document)\n', (14859, 14874), False, 'import nineml\n'), ((7964, 7996), 'nineml.annotations.Annotations', 'nineml.annotations.Annotations', ([], {}), '()\n', (7994, 7996), False, 'import nineml\n'), ((14672, 14685), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14683, 14685), False, 'from collections import OrderedDict\n')] |
from pymongo import MongoClient
import gridfs
import os
from dotenv import load_dotenv, find_dotenv
"""
MongoDB setup
"""
# load MongoDB details as environmental variables from .env file
load_dotenv(find_dotenv())
MONGODB_URI = os.environ.get('MONGODB_URI')
DB_NAME = os.environ.get('DB_NAME')
# create MongoDB client, connect to queries collection in given database
client = MongoClient(MONGODB_URI)
db = client[DB_NAME]
fs = gridfs.GridFS(db)
queries = db.queries
"""
Make query JSON serialisable
"""
# make a document object json serialisable
def serialise_id(query):
# extract query create date from ObjectId and store as separate attribute
# (ObjectId contains information including the time of creation)
date = query['_id'].generation_time
query['createtime'] = date
# cnvert ObjectIds into strings
query['_id'] = str(query['_id'])
query['rawtweetsid'] = str(query['rawtweetsid'])
return query
| [
"gridfs.GridFS",
"pymongo.MongoClient",
"dotenv.find_dotenv",
"os.environ.get"
] | [((230, 259), 'os.environ.get', 'os.environ.get', (['"""MONGODB_URI"""'], {}), "('MONGODB_URI')\n", (244, 259), False, 'import os\n'), ((270, 295), 'os.environ.get', 'os.environ.get', (['"""DB_NAME"""'], {}), "('DB_NAME')\n", (284, 295), False, 'import os\n'), ((379, 403), 'pymongo.MongoClient', 'MongoClient', (['MONGODB_URI'], {}), '(MONGODB_URI)\n', (390, 403), False, 'from pymongo import MongoClient\n'), ((430, 447), 'gridfs.GridFS', 'gridfs.GridFS', (['db'], {}), '(db)\n', (443, 447), False, 'import gridfs\n'), ((201, 214), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (212, 214), False, 'from dotenv import load_dotenv, find_dotenv\n')] |
import json
import jieba
import pickle
import csv, h5py
import pandas as pd
import numpy as np
from tqdm import *
import torch
from torch import Tensor
from torch.autograd import Variable
import torch.utils.data as data
from main import Hyperparameters
from collections import Counter
STOP_TAG = "#stop#"
UNK_TAG = "#unk#"
def filter(ret, min_count):
count = pd.Series(ret).value_counts()
count = count[count >= min_count]
char_set = list(count.index)
return char_set
def get_vocab(param):
ret = []
with open(param.train_json_path) as f:
for line in tqdm(f):
line = json.loads(line)
if len(line['answer_docs']) == 0 or len(line['fake_answers']) == 0:
continue
document = line['documents'][line['answer_docs'][0]]
paragraph = document['paragraphs'][document['most_related_para']]
for p in paragraph: ret.append(p)
ret = filter(ret, param.min_count)
ret = sorted(list(ret))
input_set = [STOP_TAG, UNK_TAG]
input_set.extend(list(ret))
input_set_size = len(input_set)
input2idx = dict(zip(input_set, range(input_set_size)))
print('Vacabulary size:', input_set_size, '\n')
return input2idx, input_set_size
def save_vocab(path, input2idx):
print('Saving bocabulary...')
f = open(path,'wb')
pickle.dump(input2idx, f)
f.close()
def load_vocab(path):
print('Loading vocabulary...')
f = open(path, 'rb')
input2idx = pickle.load(f)
input_set = list(input2idx.keys())
input_set_size = len(input_set)
f.close()
print('Vacabulary size:', input_set_size, '\n')
return input2idx, input_set_size
# ------------------ save h5py file --------------------------- #
def load_evidence_and_feats(evidence, feats, input2idx):
evidence_vector = []
feats_vector = []
for e, f in zip(evidence, feats):
if e in input2idx:
evidence_vector.append(input2idx[e])
feats_vector.append(f)
return evidence_vector, feats_vector, len(evidence_vector)
def pad_sequence(seq, seq_size, word2idx):
vector = []
for i in range(seq_size):
if i >= len(seq):
vector.append(word2idx[STOP_TAG])
elif seq[i] not in word2idx:
vector.append(word2idx[UNK_TAG])
else:
vector.append(word2idx[seq[i]])
if len(seq) < seq_size:
length = len(seq)
else:
length = seq_size
return vector, length
def save_data(file, param, data, shape, i):
if i <= param.batch_storage_size:
for key, value in data.items():
if value == []: continue
file.create_dataset(key, data = value, maxshape = shape[key])
else:
old_len = len(file['question'])
new_len = old_len + len(data['question'])
for key, value in data.items():
if value == []: continue
new_shape = [new_len]
for s in shape[key][1:]:
new_shape.append(s)
file[key].resize(new_shape)
file[key][old_len: new_len] = value
print(i)
def get_train_data(param, line):
document = line['documents'][line['answer_docs'][0]]
#paragraph = document['paragraphs'][document['most_related_para']]
segmented_paragraph = document['segmented_paragraphs'][document['most_related_para']]
paragraph = ''.join(segmented_paragraph)
if len(paragraph) > param.paragraph_size:
return [], [], []
paragraph, paragraph_length = pad_sequence(paragraph, param.paragraph_size, param.word2idx)
answer_span = line['answer_spans'][0]
fake_answer = line['fake_answers'][0]
answer_start = len(''.join(segmented_paragraph[:answer_span[0]]))
answer_end = len(''.join(segmented_paragraph[:answer_span[1]+1]))
answer = [answer_start, answer_end]
return paragraph, paragraph_length, answer
def get_val_data(param, line):
paragraphs, paragraph_lengths, answers = [], [], []
documents = line['documents']
question_tokens = line['segmented_question']
for d in documents:
para_infos = []
for para_tokens in d['segmented_paragraphs']:
common_with_question = Counter(para_tokens) & Counter(question_tokens)
correct_preds = sum(common_with_question.values())
if correct_preds == 0:
recall_wrt_question = 0
else:
recall_wrt_question = float(correct_preds) / len(question_tokens)
para_infos.append((para_tokens, recall_wrt_question, len(para_tokens)))
para_infos.sort(key=lambda x: (-x[1], x[2]))
fake_paragraph = ''.join(para_infos[0][0])
if (len(fake_paragraph)) > param.paragraph_size:
continue
fake_paragraph, fake_paragraph_length = pad_sequence(fake_paragraph, param.paragraph_size, param.word2idx)
paragraphs.append(fake_paragraph)
paragraph_lengths.append(fake_paragraph_length)
answers = line['answers']
return paragraphs, paragraph_lengths, answers
def save_h5py_file(param, old_path, new_path):
print('Saving (', new_path, ')...')
file = h5py.File(new_path,'w')
data = {'question_id':[], 'question_type':[], 'question':[], 'question_length':[],
'paragraph':[], 'answer':[], 'paragraph_length':[], 'paragraphs':[], 'paragraph_lengths':[]}
shape = {'question_id':(None,), 'question_type':(None,), 'question':(None, param.question_size), 'question_length':(None,),
'paragraph':(None, param.paragraph_size), 'answer':(None, 2), 'paragraph_length':(None,),
'paragraphs':(None, None, param.paragraph_size), 'paragraph_lengths':(None, None,)}
#evaluate = {}
i = 0
with open(old_path) as f:
for line in tqdm(f):
line = json.loads(line)
documents = line['documents']
question = line['question']
question_id = line['question_id']
question_type = line['question_type']
question_tokens = line['segmented_question']
if len(question) > param.question_size:
continue
# train
if old_path == param.train_json_path:
if len(line['answer_docs']) == 0 or len(line['fake_answers']) == 0:
continue
paragraph, paragraph_length, answer = get_train_data(param, line)
if paragraph == []: continue
data['paragraph'].append(paragraph)
data['paragraph_length'].append(paragraph_length)
data['answer'].append(answer)
# val
elif old_path == param.val_json_path:
paragraphs, paragraph_lengths, answers = get_val_data(param, line)
if paragraphs == []: continue
data['paragraphs'].append(paragraphs)
data['paragraph_lengths'].append(paragraph_lengths)
#data['answers'].append(answers)
data['question_id'].append(question_id)
question, question_length = pad_sequence(question, param.question_size, param.word2idx)
data['question'].append(question)
data['question_length'].append(question_length)
# ---------------------------------
i += 1
if i % param.batch_storage_size == 0:
save_data(file, param, data, shape, i)
data = {'question_id':[], 'question_type':[], 'question':[], 'question_length':[],
'paragraph':[], 'answer':[], 'paragraph_length':[], 'paragraphs':[], 'paragraph_lengths':[]}
if i % param.batch_storage_size != 0:
save_data(file, param, data, shape, i)
file.close()
print('Dataset: ', i)
def get_answer():
with open(param.val_json_path) as f:
for line in tqdm(f):
line = json.loads(line)
question_id = line['question_id']
answers = line['answers']
if __name__ == '__main__':
param = Hyperparameters()
# 5143
#word2idx, word_set_size = get_vocab(param)
#idx2word = dict(zip(word2idx.values(), word2idx.keys()))
#print(word2idx['苏'], idx2word[520])
#save_vocab(param.vocab_path, word2idx)
param.word2idx, param.vocab_size = load_vocab(param.vocab_path)
param.idx2word = dict(zip(param.word2idx.values(), param.word2idx.keys()))
#print(word2idx['苏'], idx2word[520])
#save_h5py_file(param, param.train_json_path, param.train_h5py_path)
save_h5py_file(param, param.val_json_path, param.val_h5py_path)
| [
"pandas.Series",
"json.loads",
"pickle.dump",
"pickle.load",
"h5py.File",
"collections.Counter",
"main.Hyperparameters",
"torch.utils.data.items"
] | [((1240, 1265), 'pickle.dump', 'pickle.dump', (['input2idx', 'f'], {}), '(input2idx, f)\n', (1251, 1265), False, 'import pickle\n'), ((1368, 1382), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1379, 1382), False, 'import pickle\n'), ((4618, 4642), 'h5py.File', 'h5py.File', (['new_path', '"""w"""'], {}), "(new_path, 'w')\n", (4627, 4642), False, 'import csv, h5py\n'), ((7030, 7047), 'main.Hyperparameters', 'Hyperparameters', ([], {}), '()\n', (7045, 7047), False, 'from main import Hyperparameters\n'), ((2354, 2366), 'torch.utils.data.items', 'data.items', ([], {}), '()\n', (2364, 2366), True, 'import torch.utils.data as data\n'), ((2568, 2580), 'torch.utils.data.items', 'data.items', ([], {}), '()\n', (2578, 2580), True, 'import torch.utils.data as data\n'), ((368, 382), 'pandas.Series', 'pd.Series', (['ret'], {}), '(ret)\n', (377, 382), True, 'import pandas as pd\n'), ((586, 602), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (596, 602), False, 'import json\n'), ((5215, 5231), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5225, 5231), False, 'import json\n'), ((6908, 6924), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6918, 6924), False, 'import json\n'), ((3788, 3808), 'collections.Counter', 'Counter', (['para_tokens'], {}), '(para_tokens)\n', (3795, 3808), False, 'from collections import Counter\n'), ((3811, 3835), 'collections.Counter', 'Counter', (['question_tokens'], {}), '(question_tokens)\n', (3818, 3835), False, 'from collections import Counter\n')] |
import paramiko
import sys
import datetime
import threading
import logging
"""
Edit this line and add your command
"""
#cmd2run = "for f in $(ioscli lsdev -type adapter | grep fcs | grep 8Gb | awk {'print $1'}); do wwpn=$(ioscli lsdev -dev $f -vpd | grep Network | sed s'/\.//g;s/Network Address//g;s/ //g');echo $f,$wwpn; done"
cmd2run = "echo \"lslpp -l | grep -i bes\" | oem_setup_env"
sys.tracebacklimit = 0
if len(sys.argv) < 1:
logging.error("Not enough arguments")
sys.exit(1)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
current_date = datetime.date.today()
results = []
def run_dsh(ip):
try:
if "vsa" in ip:
ssh.connect(hostname=ip, port=22, username='padmin', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
elif "hmc" in ip:
ssh.connect(hostname=ip, port=22, username='hscroot', password="<PASSWORD>", timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
else:
ssh.connect(hostname=ip, port=22, username='ibmadmin', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
except:
print("[+] Unable to get info from " + str(ip)) + str(Exception)
finally:
pass
threads = []
for x in sys.argv[1:]:
if x:
t = threading.Thread(target=run_dsh, args=(x,))
threads.append(t)
for i in threads:
i.start()
i.join()
print("\n------------------------------------------------------\n")
for line in results:
if line:
print(str(line[0]).rstrip('\n') + ": " + str(line[1]).rstrip('\n'))
print("\n------------------------------------------------------\n")
| [
"sys.exit",
"threading.Thread",
"datetime.date.today",
"paramiko.SSHClient",
"logging.error"
] | [((500, 520), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (518, 520), False, 'import paramiko\n'), ((621, 642), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (640, 642), False, 'import datetime\n'), ((439, 476), 'logging.error', 'logging.error', (['"""Not enough arguments"""'], {}), "('Not enough arguments')\n", (452, 476), False, 'import logging\n'), ((481, 492), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (489, 492), False, 'import sys\n'), ((1867, 1910), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_dsh', 'args': '(x,)'}), '(target=run_dsh, args=(x,))\n', (1883, 1910), False, 'import threading\n')] |
import librosa
from numba import jit
import numpy as np
@jit(nopython=True, cache=True)
def __C_to_DE(C: np.ndarray = None,
dn: np.ndarray = np.array([1, 1, 0], np.int64),
dm: np.ndarray = np.array([1, 0, 1], np.int64),
dw: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
sub_sequence: bool = False) -> (np.ndarray, np.ndarray):
"""This function computes the accumulated cost matrix D and the step index
matrix E.
Parameters
----------
C : np.ndarray (np.float32 / np.float64) [shape=(N, M)]
Cost matrix
dn : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (N direction of C), default: [1, 1, 0]
dm : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (M direction of C), default: [1, 0, 1]
dw : np.ndarray (np.float64) [shape=(1, S)]
Double array defining the weight of the each step, default: [1.0, 1.0, 1.0]
sub_sequence : bool
Set `True` for SubSequence DTW, default: False
Returns
-------
D : np.ndarray (np.float64) [shape=(N, M)]
Accumulated cost matrix of type double
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix.
E[n, m] holds the index of the step take to determine the value of D[n, m].
If E[n, m] is zero, no valid step was possible.
NaNs in the cost matrix are preserved, invalid fields in the cost matrix are NaNs.
"""
if C is None:
raise ValueError('C must be a 2D numpy array.')
N, M = C.shape
S = dn.size
if S != dm.size or S != dw.size:
raise ValueError('The parameters dn,dm, and dw must be of equal length.')
# calc bounding box size of steps
sbbn = np.max(dn)
sbbm = np.max(dm)
# initialize E
E = np.zeros((N, M), np.int64) - 1
# initialize extended D matrix
D = np.ones((sbbn + N, sbbm + M), np.float64) * np.inf
if sub_sequence:
for m in range(M):
D[sbbn, sbbm + m] = C[0, m]
else:
D[sbbn, sbbm] = C[0, 0]
# accumulate
for m in range(sbbm, M + sbbm):
for n in range(sbbn, N + sbbn):
for s in range(S):
cost = D[n - dn[s], m - dm[s]] + C[n - sbbn, m - sbbm] * dw[s]
if cost < D[n, m]:
D[n, m] = cost
E[n - sbbn, m - sbbm] = s
D = D[sbbn: N + sbbn, sbbm: M + sbbm]
return D, E
@jit(nopython=True, cache=True)
def __E_to_warping_path(E: np.ndarray,
dn: np.ndarray = np.array([1, 1, 0], np.int64),
dm: np.ndarray = np.array([1, 0, 1], np.int64),
sub_sequence: bool = False,
end_index: int = -1) -> np.ndarray:
"""This function computes a warping path based on the provided matrix E
and the allowed steps.
Parameters
----------
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix
dn : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (N direction of C), default: [1, 1, 0]
dm : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (M direction of C), default: [1, 0, 1]
sub_sequence : bool
Set `True` for SubSequence DTW, default: False
end_index : int
In case of SubSequence DTW
Returns
-------
warping_path : np.ndarray (np.int64) [shape=(2, M)]
Resulting optimal warping path
"""
N, M = E.shape
if not sub_sequence and end_index == -1:
end_index = M - 1
m = end_index
n = N - 1
warping_path = np.zeros((2, n + m + 1))
index = 0
def _loop(m, n, index):
warping_path[:, index] = np.array([n, m])
step_index = E[n, m]
m -= dm[step_index]
n -= dn[step_index]
index += 1
return m, n, index
if sub_sequence:
while n > 0:
m, n, index = _loop(m, n, index)
else:
while m > 0 or n > 0:
m, n, index = _loop(m, n, index)
warping_path[:, index] = np.array([n, m])
warping_path = warping_path[:, index::-1]
return warping_path
def compute_warping_path(C: np.ndarray,
step_sizes: np.ndarray = np.array([[1, 0], [0, 1], [1, 1]], np.int64),
step_weights: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
implementation: str = 'synctoolbox'):
"""Applies DTW on cost matrix C.
Parameters
----------
C : np.ndarray (np.float32 / np.float64) [shape=(N, M)]
Cost matrix
step_sizes : np.ndarray (np.int64) [shape=(2, S)]
Array of step sizes
step_weights : np.ndarray (np.float64) [shape=(2, S)]
Array of step weights
implementation: str
Choose among ``synctoolbox`` and ``librosa``. (default: ``synctoolbox``)
Returns
-------
D : np.ndarray (np.float64) [shape=(N, M)]
Accumulated cost matrix
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix
wp : np.ndarray (np.int64) [shape=(2, M)]
Warping path
"""
if implementation == 'librosa':
D, wp, E = librosa.sequence.dtw(C=C,
step_sizes_sigma=step_sizes,
weights_add=np.array([0, 0, 0]),
weights_mul=step_weights,
return_steps=True,
subseq=False)
wp = wp[::-1].T
elif implementation == 'synctoolbox':
dn = step_sizes[:, 0]
dm = step_sizes[:, 1]
D, E = __C_to_DE(C,
dn=dn,
dm=dm,
dw=step_weights,
sub_sequence=False)
wp = __E_to_warping_path(E=E,
dn=dn,
dm=dm,
sub_sequence=False)
else:
raise NotImplementedError(f'No implementation found called {implementation}')
return D, E, wp
| [
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numba.jit"
] | [((59, 89), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (62, 89), False, 'from numba import jit\n'), ((2478, 2508), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (2481, 2508), False, 'from numba import jit\n'), ((157, 186), 'numpy.array', 'np.array', (['[1, 1, 0]', 'np.int64'], {}), '([1, 1, 0], np.int64)\n', (165, 186), True, 'import numpy as np\n'), ((219, 248), 'numpy.array', 'np.array', (['[1, 0, 1]', 'np.int64'], {}), '([1, 0, 1], np.int64)\n', (227, 248), True, 'import numpy as np\n'), ((281, 318), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]', 'np.float64'], {}), '([1.0, 1.0, 1.0], np.float64)\n', (289, 318), True, 'import numpy as np\n'), ((1777, 1787), 'numpy.max', 'np.max', (['dn'], {}), '(dn)\n', (1783, 1787), True, 'import numpy as np\n'), ((1799, 1809), 'numpy.max', 'np.max', (['dm'], {}), '(dm)\n', (1805, 1809), True, 'import numpy as np\n'), ((2589, 2618), 'numpy.array', 'np.array', (['[1, 1, 0]', 'np.int64'], {}), '([1, 1, 0], np.int64)\n', (2597, 2618), True, 'import numpy as np\n'), ((2661, 2690), 'numpy.array', 'np.array', (['[1, 0, 1]', 'np.int64'], {}), '([1, 0, 1], np.int64)\n', (2669, 2690), True, 'import numpy as np\n'), ((3676, 3700), 'numpy.zeros', 'np.zeros', (['(2, n + m + 1)'], {}), '((2, n + m + 1))\n', (3684, 3700), True, 'import numpy as np\n'), ((4129, 4145), 'numpy.array', 'np.array', (['[n, m]'], {}), '([n, m])\n', (4137, 4145), True, 'import numpy as np\n'), ((4309, 4353), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]', 'np.int64'], {}), '([[1, 0], [0, 1], [1, 1]], np.int64)\n', (4317, 4353), True, 'import numpy as np\n'), ((4407, 4444), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]', 'np.float64'], {}), '([1.0, 1.0, 1.0], np.float64)\n', (4415, 4444), True, 'import numpy as np\n'), ((1838, 1864), 'numpy.zeros', 'np.zeros', (['(N, M)', 'np.int64'], {}), '((N, M), np.int64)\n', (1846, 1864), True, 'import numpy as np\n'), ((1913, 1954), 'numpy.ones', 'np.ones', (['(sbbn + N, sbbm + M)', 'np.float64'], {}), '((sbbn + N, sbbm + M), np.float64)\n', (1920, 1954), True, 'import numpy as np\n'), ((3778, 3794), 'numpy.array', 'np.array', (['[n, m]'], {}), '([n, m])\n', (3786, 3794), True, 'import numpy as np\n'), ((5389, 5408), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5397, 5408), True, 'import numpy as np\n')] |
import sys as _sys
import pandas as _pd
from apodeixi.testing_framework.a6i_unit_test import ApodeixiUnitTest
from apodeixi.testing_framework.controllers.mock_controller import Mock_Controller
from apodeixi.testing_framework.mock_kb_store import UnitTest_KnowledgeBaseStore
from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace
from apodeixi.xli.breakdown_builder import BreakdownTree
from apodeixi.xli.interval import Interval
from apodeixi.xli.uid_store import UID_Store
from apodeixi.xli.posting_controller_utils import PostingConfig
from apodeixi.xli.update_policy import UpdatePolicy
from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo
class Test_BreakoutTree(ApodeixiUnitTest):
def setUp(self):
super().setUp()
def _create_df(self):
columns = ['A', 'color', 'size', 'B', 'height', 'coolness', 'C']
row0 = ['a1', 'brown', "32in", 'b1', "5' 8''", 'so-so', 'c1']
row1 = ['', '', "", 'b2', "6' 1''", 'awesome', 'c2']
row2 = ['', '', "", '', "", '', 'c3']
row3 = ['a2', 'red hair', "29in", 'b3', "165cm", 'cool cat', 'c4']
df = _pd.DataFrame(columns=columns, data = [row0, row1, row2, row3])
return df
def _create_df2(self):
columns = ['Expectation', 'Description', 'Acceptance Criteria', 'Artifact']
row0 = ['Segmentation model', 'Tier/Geo/Vertical', 'Analysis', 'Tree model']
row1 = ['', '', 'Market Validation', 'Analysists data']
row2 = ['Jobs to be done model', 'Understand buying', 'Timeline clear', 'BPMN diagram']
row3 = ['', '', 'Behavior clear', 'Sequence diagram']
df2 = _pd.DataFrame(columns=columns, data = [row0, row1, row2, row3])
return df2
def test_read_df_fragment(self):
result_dict = None
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Reading df fragment")
try:
tree = self._create_breakdown_tree(root_trace, 'read_df_fragment')
result_dict = tree.as_dicts()
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self._compare_to_expected_yaml(root_trace, result_dict, test_output_name = 'read_df_fragment', save_output_dict=True)
def test_find(self):
UID_TO_FIND = 'A2.B1.C1'
NAME_OF_ENTITY_TO_FIND = 'c4'
entity_instance = None
try:
my_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Finding uid='" + UID_TO_FIND + "'")
tree = self._create_breakdown_tree(my_trace, "Finding UID")
entity_instance = tree.find (UID_TO_FIND, my_trace)
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self.assertEqual(entity_instance.name, NAME_OF_ENTITY_TO_FIND)
def test_docking_1(self):
DOCKING_UID = 'A2.B1'
ENTITY_TO_DOCK = "Costs"
columns = [ENTITY_TO_DOCK, 'Purpose']
row0 = ["<NAME>", 'Customer Visit']
df = _pd.DataFrame(columns=columns, data = [row0])
DATA_TO_ATTACH = next(df.iterrows())[1]
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Tesing docking")
entity_instance = None
try:
tree = self._create_breakdown_tree(root_trace, 'docking_1')
xlr_config = self._create_posting_config(root_trace, 'docking_1')
my_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Docking uid='" + DOCKING_UID + "'")
tree.dockEntityData ( full_docking_uid = DOCKING_UID,
entity_type = ENTITY_TO_DOCK,
data_to_attach = DATA_TO_ATTACH,
parent_trace = my_trace,
uid_to_overwrite = None,
xlr_config = xlr_config,
acronym_schema = None)
result_dict = tree.as_dicts()
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self._compare_to_expected_yaml(root_trace, result_dict, test_output_name = 'docking_1', save_output_dict=True)
def test_docking_2(self):
DOCKING_UID = 'A2.B1'
ENTITY_TO_DOCK = "C"
columns = [ENTITY_TO_DOCK, 'Typo']
row0 = ["Immueble rojo", 'Residencial']
df = _pd.DataFrame(columns=columns, data = [row0])
DATA_TO_ATTACH = next(df.iterrows())[1]
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Testing docking")
entity_instance = None
try:
tree = self._create_breakdown_tree(root_trace, 'docking_2')
xlr_config = self._create_posting_config(root_trace, 'docking_2')
my_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Docking uid='" + DOCKING_UID + "'")
tree.dockEntityData ( full_docking_uid = DOCKING_UID,
entity_type = ENTITY_TO_DOCK,
data_to_attach = DATA_TO_ATTACH,
uid_to_overwrite = None,
parent_trace = my_trace,
xlr_config = xlr_config,
acronym_schema = None)
result_dict = tree.as_dicts()
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self._compare_to_expected_yaml(root_trace, result_dict, test_output_name = 'docking_2', save_output_dict=True)
def test_acronyms(self):
entities = ['Costs', 'Cost Models', "Ferries", 'Carry Mirrors', 'CO', 'Costs']
EXPECTED = ['CO', 'CM', 'F', 'CAMI', 'COC', 'CO']
try:
my_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Testing acronym generation")
tree = self._create_breakdown_tree(my_trace, "Testing acronym generation")
result = []
for e in entities:
result.append(tree.getAcronym(my_trace, e))
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self.assertEqual(result, EXPECTED)
def test_attach_subtree(self):
result_dict = None
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Attaching subtree")
try:
tree1 = self._create_breakdown_tree(root_trace, 'attach_subtree')
subtree_df = self._create_df2()
xlr_config = self._create_posting_config(root_trace, 'attach_subtree')
subtree_intervals = [ Interval(None, ['Expectation', 'Description']),
Interval(None, [ 'Acceptance Criteria', 'Artifact'])]
acronym_schema = UID_Acronym_Schema()
acronym_schema.acronyminfo_list = [AcronymInfo("A", "A"), AcronymInfo("B", "B"), AcronymInfo("C", "C"),
AcronymInfo("E", "Expectation"),
AcronymInfo("AC", "Acceptance Criteria")]
self._attach_subtree( df_to_attach = subtree_df,
intervals = subtree_intervals,
tree_to_attach_to = tree1,
docking_uid = 'A2.B1.C1',
xlr_config = xlr_config,
acronym_schema = acronym_schema)
result_dict = tree1.as_dicts()
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self._compare_to_expected_yaml(root_trace, result_dict, test_output_name = 'attach_subtree', save_output_dict=True)
def _create_breakdown_tree(self, parent_trace, test_case_name):
my_trace = parent_trace.doing("Creating UID Store")
store = UID_Store(my_trace)
xlr_config = self._create_posting_config(my_trace, test_case_name)
entity_type = 'A'
parent_UID = None
my_trace = parent_trace.doing("Creating BreakdownTree", data={ 'entity_type' : entity_type,
'parent_UID' : parent_UID})
tree = BreakdownTree(uid_store = store, entity_type=entity_type, parent_UID=parent_UID)
df = self._create_df()
my_trace = parent_trace.doing("Creating intervals", data={'tree.entity_type' : tree.entity_type,
'columns' : list(df.columns)})
interval_A = Interval(my_trace, ['A', 'color', 'size'])
interval_B = Interval(my_trace, ['B', 'height', 'coolness'])
interval_C = Interval(my_trace, ['C'])
rows = list(df.iterrows())
intervals = [interval_A, interval_B, interval_C]
my_trace = parent_trace.doing("Processing DataFrame", data={'tree.entity_type' : tree.entity_type,
'columns' : list(df.columns)})
acronym_schema = UID_Acronym_Schema()
acronym_schema.acronyminfo_list = [AcronymInfo("A", "A"), AcronymInfo("B", "B"), AcronymInfo("C", "C"),
AcronymInfo("CO", "Costs")] # CO acronym is for test_docking_1
store.set_acronym_schema(my_trace, acronym_schema)
for idx in range(len(rows)):
for interval in intervals:
loop_trace = my_trace.doing(activity="Processing fragment", data={'row': idx,
'interval': interval.columns},
origination = {
'signaled_from': __file__})
tree.readDataframeFragment( interval = interval,
row = rows[idx],
parent_trace = loop_trace,
all_rows = rows,
xlr_config = xlr_config,
acronym_schema = None)
return tree
def _create_posting_config(self, parent_trace, test_case_name):
'''
Returns a dummy PostingConfig object. Needed only because some of the functions we test in this module
require it as a parameter, though all that they require is an UpdatePolicy object within the PostingConfig
'''
update_policy = UpdatePolicy(reuse_uids=True, merge=False)
kb_store = UnitTest_KnowledgeBaseStore( test_case_name = test_case_name,
input_manifests_dir = self.input_data,
input_postings_dir = self.input_data,
output_manifests_dir = self.output_data,
output_postings_dir = self.output_data)
controller = Mock_Controller(parent_trace, store=kb_store, a6i_config=self.a6i_config)
# To avoid error messages, we will need a dummy but structurally complete manifest meta data,
# even if there is no real manifest here
controller.show_your_work.keep_manifest_meta( parent_trace = parent_trace,
manifest_nb = -99,
kind = "FAKE -99 KIND",
excel_range = "A1:B2",
excel_sheet = "FAKE WORSHEET")
xlr_config = PostingConfig( kind = "FAKE -99 KIND",
manifest_nb = -99, # None would trigger error, so put a dummy number
update_policy = update_policy,
controller = controller)
return xlr_config
def _attach_subtree(self, df_to_attach, intervals, tree_to_attach_to, docking_uid, xlr_config, acronym_schema):
store = tree_to_attach_to.uid_store
entity_type = intervals[0].entity_name
subtree = BreakdownTree(uid_store = store, entity_type=entity_type, parent_UID=docking_uid)
rows = list(df_to_attach.iterrows())
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Populating subtree", data={'subtree.entity_type' : entity_type,
'columns' : list(df_to_attach.columns)},
origination = {
'signaled_from': __file__})
store.set_acronym_schema(root_trace, acronym_schema)
for idx in range(len(rows)):
for interval in intervals:
my_trace = root_trace.doing(activity="Processing fragment", data={'row': idx, 'interval': interval})
subtree.readDataframeFragment( interval = interval,
row = rows[idx],
parent_trace = my_trace,
all_rows = rows,
xlr_config = xlr_config,
acronym_schema = None)
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Attaching subtree", data = {"docking UID" : "'" + subtree.parent_UID + "'",
"entity_type" : "'" + entity_type + "'"})
tree_to_attach_to.dock_subtree(entity_type, subtree, root_trace)
if __name__ == "__main__":
# execute only if run as a script
def main(args):
what_to_do = args[1]
T = Test_BreakoutTree()
T.setUp()
if what_to_do=='read_df_fragment':
T.test_read_df_fragment()
elif what_to_do=='find':
T.test_find()
elif what_to_do=='attach_subtree':
T.test_attach_subtree()
elif what_to_do=='docking_2':
T.test_docking_2()
main(_sys.argv) | [
"apodeixi.xli.posting_controller_utils.PostingConfig",
"apodeixi.util.a6i_error.FunctionalTrace",
"apodeixi.xli.update_policy.UpdatePolicy",
"apodeixi.xli.uid_acronym_schema.UID_Acronym_Schema",
"apodeixi.xli.interval.Interval",
"apodeixi.testing_framework.mock_kb_store.UnitTest_KnowledgeBaseStore",
"pa... | [((1647, 1708), 'pandas.DataFrame', '_pd.DataFrame', ([], {'columns': 'columns', 'data': '[row0, row1, row2, row3]'}), '(columns=columns, data=[row0, row1, row2, row3])\n', (1660, 1708), True, 'import pandas as _pd\n'), ((2395, 2456), 'pandas.DataFrame', '_pd.DataFrame', ([], {'columns': 'columns', 'data': '[row0, row1, row2, row3]'}), '(columns=columns, data=[row0, row1, row2, row3])\n', (2408, 2456), True, 'import pandas as _pd\n'), ((4047, 4090), 'pandas.DataFrame', '_pd.DataFrame', ([], {'columns': 'columns', 'data': '[row0]'}), '(columns=columns, data=[row0])\n', (4060, 4090), True, 'import pandas as _pd\n'), ((5688, 5731), 'pandas.DataFrame', '_pd.DataFrame', ([], {'columns': 'columns', 'data': '[row0]'}), '(columns=columns, data=[row0])\n', (5701, 5731), True, 'import pandas as _pd\n'), ((9741, 9760), 'apodeixi.xli.uid_store.UID_Store', 'UID_Store', (['my_trace'], {}), '(my_trace)\n', (9750, 9760), False, 'from apodeixi.xli.uid_store import UID_Store\n'), ((10165, 10243), 'apodeixi.xli.breakdown_builder.BreakdownTree', 'BreakdownTree', ([], {'uid_store': 'store', 'entity_type': 'entity_type', 'parent_UID': 'parent_UID'}), '(uid_store=store, entity_type=entity_type, parent_UID=parent_UID)\n', (10178, 10243), False, 'from apodeixi.xli.breakdown_builder import BreakdownTree\n'), ((10563, 10605), 'apodeixi.xli.interval.Interval', 'Interval', (['my_trace', "['A', 'color', 'size']"], {}), "(my_trace, ['A', 'color', 'size'])\n", (10571, 10605), False, 'from apodeixi.xli.interval import Interval\n'), ((10639, 10686), 'apodeixi.xli.interval.Interval', 'Interval', (['my_trace', "['B', 'height', 'coolness']"], {}), "(my_trace, ['B', 'height', 'coolness'])\n", (10647, 10686), False, 'from apodeixi.xli.interval import Interval\n'), ((10718, 10743), 'apodeixi.xli.interval.Interval', 'Interval', (['my_trace', "['C']"], {}), "(my_trace, ['C'])\n", (10726, 10743), False, 'from apodeixi.xli.interval import Interval\n'), ((11141, 11161), 'apodeixi.xli.uid_acronym_schema.UID_Acronym_Schema', 'UID_Acronym_Schema', ([], {}), '()\n', (11159, 11161), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((12776, 12818), 'apodeixi.xli.update_policy.UpdatePolicy', 'UpdatePolicy', ([], {'reuse_uids': '(True)', 'merge': '(False)'}), '(reuse_uids=True, merge=False)\n', (12788, 12818), False, 'from apodeixi.xli.update_policy import UpdatePolicy\n'), ((12849, 13070), 'apodeixi.testing_framework.mock_kb_store.UnitTest_KnowledgeBaseStore', 'UnitTest_KnowledgeBaseStore', ([], {'test_case_name': 'test_case_name', 'input_manifests_dir': 'self.input_data', 'input_postings_dir': 'self.input_data', 'output_manifests_dir': 'self.output_data', 'output_postings_dir': 'self.output_data'}), '(test_case_name=test_case_name,\n input_manifests_dir=self.input_data, input_postings_dir=self.input_data,\n output_manifests_dir=self.output_data, output_postings_dir=self.output_data\n )\n', (12876, 13070), False, 'from apodeixi.testing_framework.mock_kb_store import UnitTest_KnowledgeBaseStore\n'), ((13369, 13442), 'apodeixi.testing_framework.controllers.mock_controller.Mock_Controller', 'Mock_Controller', (['parent_trace'], {'store': 'kb_store', 'a6i_config': 'self.a6i_config'}), '(parent_trace, store=kb_store, a6i_config=self.a6i_config)\n', (13384, 13442), False, 'from apodeixi.testing_framework.controllers.mock_controller import Mock_Controller\n'), ((14128, 14237), 'apodeixi.xli.posting_controller_utils.PostingConfig', 'PostingConfig', ([], {'kind': '"""FAKE -99 KIND"""', 'manifest_nb': '(-99)', 'update_policy': 'update_policy', 'controller': 'controller'}), "(kind='FAKE -99 KIND', manifest_nb=-99, update_policy=\n update_policy, controller=controller)\n", (14141, 14237), False, 'from apodeixi.xli.posting_controller_utils import PostingConfig\n'), ((14746, 14825), 'apodeixi.xli.breakdown_builder.BreakdownTree', 'BreakdownTree', ([], {'uid_store': 'store', 'entity_type': 'entity_type', 'parent_UID': 'docking_uid'}), '(uid_store=store, entity_type=entity_type, parent_UID=docking_uid)\n', (14759, 14825), False, 'from apodeixi.xli.breakdown_builder import BreakdownTree\n'), ((8515, 8535), 'apodeixi.xli.uid_acronym_schema.UID_Acronym_Schema', 'UID_Acronym_Schema', ([], {}), '()\n', (8533, 8535), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((11209, 11230), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""A"""', '"""A"""'], {}), "('A', 'A')\n", (11220, 11230), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((11232, 11253), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""B"""', '"""B"""'], {}), "('B', 'B')\n", (11243, 11253), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((11255, 11276), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""C"""', '"""C"""'], {}), "('C', 'C')\n", (11266, 11276), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((11326, 11352), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""CO"""', '"""Costs"""'], {}), "('CO', 'Costs')\n", (11337, 11352), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((2608, 2669), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (2623, 2669), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((4181, 4242), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (4196, 4242), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((5822, 5883), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (5837, 5883), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((7938, 7999), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (7953, 7999), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((8325, 8371), 'apodeixi.xli.interval.Interval', 'Interval', (['None', "['Expectation', 'Description']"], {}), "(None, ['Expectation', 'Description'])\n", (8333, 8371), False, 'from apodeixi.xli.interval import Interval\n'), ((8410, 8461), 'apodeixi.xli.interval.Interval', 'Interval', (['None', "['Acceptance Criteria', 'Artifact']"], {}), "(None, ['Acceptance Criteria', 'Artifact'])\n", (8418, 8461), False, 'from apodeixi.xli.interval import Interval\n'), ((8587, 8608), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""A"""', '"""A"""'], {}), "('A', 'A')\n", (8598, 8608), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((8610, 8631), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""B"""', '"""B"""'], {}), "('B', 'B')\n", (8621, 8631), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((8633, 8654), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""C"""', '"""C"""'], {}), "('C', 'C')\n", (8644, 8654), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((8708, 8739), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""E"""', '"""Expectation"""'], {}), "('E', 'Expectation')\n", (8719, 8739), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((8794, 8834), 'apodeixi.xli.uid_acronym_schema.AcronymInfo', 'AcronymInfo', (['"""AC"""', '"""Acceptance Criteria"""'], {}), "('AC', 'Acceptance Criteria')\n", (8805, 8834), False, 'from apodeixi.xli.uid_acronym_schema import UID_Acronym_Schema, AcronymInfo\n'), ((14920, 14981), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (14935, 14981), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((16163, 16224), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (16178, 16224), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((3329, 3390), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (3344, 3390), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((4544, 4605), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (4559, 4605), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((6186, 6247), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (6201, 6247), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n'), ((7328, 7389), 'apodeixi.util.a6i_error.FunctionalTrace', 'FunctionalTrace', ([], {'parent_trace': 'None', 'path_mask': 'self._path_mask'}), '(parent_trace=None, path_mask=self._path_mask)\n', (7343, 7389), False, 'from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace\n')] |
import sys, hashlib, base64
if len(sys.argv) < 2:
sys.stderr.write("usage: sha_passgen.py password")
sys.exit(-1)
pas = sys.argv[1]
hash = hashlib.sha1()
hash.update(bytes(pas, "latin-1"))
hash = hash.digest()
ret = b"{SHA}"+base64.b64encode(hash)
print(ret) | [
"sys.stderr.write",
"base64.b64encode",
"hashlib.sha1",
"sys.exit"
] | [((148, 162), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (160, 162), False, 'import sys, hashlib, base64\n'), ((53, 103), 'sys.stderr.write', 'sys.stderr.write', (['"""usage: sha_passgen.py password"""'], {}), "('usage: sha_passgen.py password')\n", (69, 103), False, 'import sys, hashlib, base64\n'), ((106, 118), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (114, 118), False, 'import sys, hashlib, base64\n'), ((235, 257), 'base64.b64encode', 'base64.b64encode', (['hash'], {}), '(hash)\n', (251, 257), False, 'import sys, hashlib, base64\n')] |
import fibonacci
print(fibonacci.version())
result = fibonacci.fib(1000000)
print(result)
| [
"fibonacci.version",
"fibonacci.fib"
] | [((55, 77), 'fibonacci.fib', 'fibonacci.fib', (['(1000000)'], {}), '(1000000)\n', (68, 77), False, 'import fibonacci\n'), ((24, 43), 'fibonacci.version', 'fibonacci.version', ([], {}), '()\n', (41, 43), False, 'import fibonacci\n')] |
import os
import typing
import importlib
from importlib.util import find_spec
def get_package_path(package_path: str) -> str:
if ":" in package_path:
package, path = package_path.split(":", 1)
spec = find_spec(package)
if spec and spec.origin:
package_dir = os.path.dirname(spec.origin)
return os.path.join(package_dir, path)
raise ValueError(f"Package {package} not found") # pragma: nocover
else:
return package_path
T = typing.TypeVar("T")
@typing.overload
def import_string(dot_path: str) -> object:
... # pragma: nocover
@typing.overload
def import_string(dot_path: str, tp: typing.Type[T]) -> T:
... # pragma: nocover
def import_string(dot_path, tp=None): # type: ignore
path, attr = dot_path.rsplit(".", 1)
module = importlib.import_module(path)
try:
ret = getattr(module, attr)
except AttributeError:
raise ImportError(f"Could not load name {dot_path}") from None
if tp is None or isinstance(ret, tp):
return ret
raise TypeError(
f"{dot_path} must be an instance of {tp}, got {ret!r}"
) # pragma: nocover
| [
"importlib.import_module",
"importlib.util.find_spec",
"os.path.join",
"os.path.dirname",
"typing.TypeVar"
] | [((500, 519), 'typing.TypeVar', 'typing.TypeVar', (['"""T"""'], {}), "('T')\n", (514, 519), False, 'import typing\n'), ((825, 854), 'importlib.import_module', 'importlib.import_module', (['path'], {}), '(path)\n', (848, 854), False, 'import importlib\n'), ((222, 240), 'importlib.util.find_spec', 'find_spec', (['package'], {}), '(package)\n', (231, 240), False, 'from importlib.util import find_spec\n'), ((300, 328), 'os.path.dirname', 'os.path.dirname', (['spec.origin'], {}), '(spec.origin)\n', (315, 328), False, 'import os\n'), ((348, 379), 'os.path.join', 'os.path.join', (['package_dir', 'path'], {}), '(package_dir, path)\n', (360, 379), False, 'import os\n')] |
# FileName: QAP3
# Honest Harry Car Sales (Looking for a program to keep track of his sales)
# Author: <NAME>
# Date: October 27, 2021
# imports
import datetime
# Constants
TAX_RATE = .15
STAND_FEE = 75 #Lincence Fee for vechiles <= $5,000
LUX_FEE = 165 #Lincence Fee for Vechiles >$5,000
TRANS_RATE = 0.01 #transfer Fee 1%
LUX_RATE = 0.016 # Lux Tax on Vechilces > $20,000
MIN_CAR_DATE = "2010" # Set the oldest Model of Car's sold by the company:
#Functions
def As_Dollars(Number):
"""Format Dollars amounts to strings"""
Number_Display = f"${Number:,.2f}"
return Number_Display
def Name_Validation(Name):
""" Function to Validate a Name for Input: Allowing Spaces, - and '"""
for Char in Name:
if ("A" <= Char <= "Z" or "a" <= Char <= "z" or Char == " "
or Char == "-" or Char == "'"):
continue
else:
return False
return True
def Time_Change(Date):
"""Convert Date input (YYYY-MM_DD) into a datetime object"""
import datetime
Date = datetime.datetime.strptime(Date, "%Y-%m-%d")
return Date
# Province Info
P = {"Newfoundland Labrador: NL", "Prince Edward Island: PE", "Nova Scotia: NS", "New Brunswick: NB",
"Quebec: QC", "Ontario: ON", "Manitoba: MB", "Saskatchewan: SK", "Alberta: AB",
"British Columbia: BC", "Yukon: YT", "Northwest Territories: NT", "Nunavut: NU"}
Province_List = ["NL", "PE", "NS", "NB", "QC", "ON", "MB", "SK", "AB", "BC", "YT", "NT", "NU"]
MIN_CAR_DATE = datetime.datetime.strptime(MIN_CAR_DATE, "%Y") #Oldest Car still in operation for a extra Validation
Today_Date = datetime.datetime.now()
Max_Date = Today_Date + datetime.timedelta(days=365) #Calculate the Max Date for cars that could be sold
# Inputs
# Customer Details Inputs
while True:
print()
print("Please Enter Customer Information")
print()
print("Enter (END) for Customer First Name to Exit Program.")
print()
while True:
Cus_First_Name = input("First Name: ").title().lstrip().rstrip()
if Cus_First_Name.upper() == "END": #Code to End the Program
break
elif Cus_First_Name == "":
print("First Name cannot be blank: Please Re-Enter")
elif len(Cus_First_Name) >25:
print("Invalid First Name Length: Cannot be longer than 25 letters ")
elif Name_Validation(Cus_First_Name) == False: #Function to Validate Name Input
print("Invalid Name Entered: Please use letters between (a-z), (-) and (') ")
else:
break
if Cus_First_Name.upper() == "END":
print()
print("Good Bye")
print()
break
while True:
Cus_Last_Name = input("Last Name: ").title().lstrip().rstrip()
if Cus_Last_Name == "":
print("Last Name cannot be blank: Please Re-Enter")
elif len(Cus_Last_Name) > 30:
print("Invalid Last Name Length: Cannot be longer than 30 letters ")
elif Name_Validation(Cus_Last_Name) == False: #Function to Validate Name Input
print("Invalid Name Entered: Please use letters between (a-z), (-) and (') ")
else:
break
while True:
Street_Address = input("Street Address: ").lstrip().rstrip().title()
if Street_Address == "":
print("Street Address Input cannot be blank: ")
elif len(Street_Address) > 35:
print("Invalid Entry Street Address Length: Cannot be longer than 35 characters ")
else:
break
while True:
City = input("City: ").lstrip().rstrip()
if City == "":
print("City Input cannot be blank: ")
elif len(City) > 20:
print("Invalid Entry City Length: Cannot be longer than 20 characters ")
else:
break
while True:
Province = input("Enter two Digit Province Code: ").upper()
if Province in Province_List:
break
else:
print()
print("Invalid Entry: Please Enter two Digit Province Code: ")
print()
for Code in P:
print(Code)
while True:
Postal_Code = input("Postal Code: ").upper().strip()
if Postal_Code == "":
print("Postal Code Entry Cannot be Blank. Please Re-enter")
elif (Postal_Code[0].isalpha() == True and Postal_Code[1].isdigit() == True and len(Postal_Code) == 6
and Postal_Code[2].isalpha() == True and Postal_Code[3].isdigit() == True and Postal_Code[4].isalpha()
and Postal_Code[5].isdigit() == True):
break
else:
print("Invalid Postal Code: Please Re-Enter (A1A1A1)")
while True:
Phone_Number = input("Enter 10 Digit Phone Number: ").strip().replace("-", "")
if Phone_Number.isdigit() == False:
print("Invalid Entry!: Enter 10 Digit Phone Number no '-' needed")
elif len(Phone_Number) != 10:
print("Invalid Entry!: Enter 10 Digit Phone Number no '-' needed")
else:
break
while True: # Date Validation Loop
try:
Purchase_date = input("Enter Invoice Date as (YYYY-MM-DD): ")
Purchase_date = Time_Change(Purchase_date)
except:
print("Invalid Date Entry. Re-Enter using (YYYY-MM-DD)")
else:
if Purchase_date > datetime.datetime.now():
print("Invalid Date: Cannot future Date Purchases ")
else:
break
print()
print("Enter Car Details") #Car Details Inputs
print()
while True:
Plate_Number = input("Licence Plate Number format XXX999: ").strip().upper()
if Plate_Number == "":
print("Licence Plate Entry Cannot be Blank. Please Re-enter")
elif Plate_Number[0:3].isalpha() == False or Plate_Number[3:6].isdigit() == False: #Licence Plate Validation for the correct format
print("Invalid Entry: Ensure entry has 3 letters and 3 digits ie: (AAA999) ")
else:
break
while True:
Car_Make = input("Enter the Car's Make & Model: ").title().lstrip().rstrip()
if Car_Make == "":
print("Car Make & Model: Entry Cannot be Blank. Please Re-enter")
else:
break
while True:
try:
print()
print(F"The Min Year: {MIN_CAR_DATE.year} & Max Year {Max_Date.year}")
print("For Special Entries outside this range input (S) for Car Year")
print()
Car_Year = input("The Year of the Car: YYYY: ").lstrip().rstrip()
# This allow for a special input outside of min-max range
if Car_Year.upper() == "S":
print()
Car_Year = input("Special Request input for car Year: YYYY ").lstrip().rstrip()
Car_Year = datetime.datetime.strptime(Car_Year, "%Y")
Car_Year = str(Car_Year.year)
break
Car_Year = datetime.datetime.strptime(Car_Year, "%Y")
except:
print("Invalid Car Year Entry: Input as per example: 2021 ")
else:
if Car_Year > Max_Date:
print(f"Invalid Entry: Car Year cannot be greater than '{Max_Date.year}' ")
elif Car_Year < MIN_CAR_DATE:
print(f"Invalid Entry: The oldest Model is set to '{MIN_CAR_DATE}' ")
else:
Car_Year = str(Car_Year.year)
break
while True: #Validation Loop for Selling and Trade Allowance
try:
Sell_Price = float(input("Sell Price: $").lstrip().rstrip())
Trade_Price = float(input("Trade Allowance: $").lstrip().rstrip())
except:
print("Invalid Entry:Input the Selling Price: ")
else:
if Sell_Price > 50000:
print()
print("Invalid Entry: Selling Price cannot exceed $50,000")
print()
elif Sell_Price < 0:
print()
print("Invalid Entry: Selling Price cannot be less than 0 ")
print()
elif Trade_Price > Sell_Price:
print()
print("Invalid Entry: Trade Allowance cannot be higher than Selling Price ")
print()
else:
break
while True:
Sales_Rep = input("Enter Sales Rep Name: ").title().lstrip().rstrip()
if Sales_Rep == "":
print("Name cannot be blank: Please Re-Enter")
elif Name_Validation(Sales_Rep) == False: #Function to Validate Name I
print("Invalid Name Entered: Please use letters between (a-z), (-) and (') ")
else:
break
while True:
try:
Credit_Card = int(input("Enter Credit Card Number: ").strip())
except:
print("Invalid Entry: Please Enter a Valid Credit Card Number ")
else:
Credit_Card = str(Credit_Card)
if Credit_Card == "":
print("Invalid Entry: Credit Card Number cannot be blank ")
elif len(Credit_Card) == 16:
Credit_Card = f"{Credit_Card[0:4]} {Credit_Card[4:8]} {Credit_Card[8:12]} {Credit_Card[12:16]}"
break
else:
print("Invalid Entry Please Enter The 16 digit Credit Card Number. ")
while True:
try:
Expiry_Date = input("Credit Card Expiry date: MM/YY ")
Expiry_Date = datetime.datetime.strptime(Expiry_Date, "%m/%y")
except:
print("Invalid Date Entry. Re-Enter using (MM/YY)")
else:
break
# Processing
After_TradeP = Sell_Price - Trade_Price
Hst = Sell_Price * TAX_RATE
Transfer = Sell_Price * TRANS_RATE
# Logic Statements for Lincense Fee and Lux Tax
if Sell_Price <= 5000:
Lincense_Fee = STAND_FEE
Lux_Tax = 0
elif Sell_Price > 5000 and Sell_Price <= 20000:
Lincense_Fee = LUX_FEE
Lux_Tax = 0
else:
Lincense_Fee = LUX_FEE + (Sell_Price * LUX_RATE)
Total_Price = After_TradeP + Hst + Transfer + Lincense_Fee
# For Loop For Monthly Payment Display
print()
AnyKey = input("Press any key to Display Financing Options....")
print()
print("# Years # Payments Financing Fee Total Price Monthly Payment")
for Years in range(1, 5) : #Loop to show different monthly payments for the customer to choose
New_Price = Total_Price
Payment = 12 * Years
Financing_Fee = 39.99 * Years #Financing Fee $39.99
New_Price += Financing_Fee
Monthly_Payment = New_Price / Payment
print(F" {Years:1} {Payment:2} {As_Dollars(Financing_Fee):>10} {As_Dollars(New_Price):10} {As_Dollars(Monthly_Payment):>10}")
print()
while True:
try:
Pay_Plan = int(input("Enter the payment schedule you want to follow (1-4): #"))
except:
print("Invlaid Entry: Please Enter a number Between 1-4 ")
else:
if Pay_Plan < 1 or Pay_Plan > 4:
print("Invalid Entry Select a payment schedule between (1-4 ")
else:
break
# Monthly Payment Processing
Payment = 12 * Pay_Plan
Financing_Fee = 39.99 * Pay_Plan
Total_Price += Financing_Fee
Monthly_Payment = Total_Price / Payment
First_Payment = Purchase_date + datetime.timedelta(days=30) # First Payment
#Receipt ID
Receipt_ID = f"{Cus_First_Name[0]}{Cus_Last_Name[0]}-{Plate_Number[3:6]}-{Phone_Number[6:10]}"
#Outputs
print()
print(" " * 6, "Honest Harry Car Sales")
print(" " * 5, "Used Car Sale and Receipt")
print()
print(f"Invoice Date: {Purchase_date.strftime('%b %d, %Y')}")
print(f"Receipt No: {Receipt_ID}")
print()
print("Sold to:")
print(f"{' ' * 5}{Cus_First_Name[0]}.{Cus_Last_Name}")
print(f"{' ' * 5}{Street_Address}")
print(f"{' ' * 5}{City},{Province:2},{Postal_Code}") #Added a max length for the city in validation to be 25 chars to keep format
print()
print("Car Details:")
print(f"{' ' * 5}{Car_Year} {Car_Make}")
print()
print(f"{'Sale price:':25}{As_Dollars(Sell_Price):>10}")
print(f"{'Trade Allowance:':25}{As_Dollars(Trade_Price):>10}")
print(f"{'Price after Trade:':25}{As_Dollars(After_TradeP):>10}")
print(f"{' ':25}{'-' * 10}")
print(f"{'HST:':25}{As_Dollars(Hst):>10}")
print(f"{'License Fee:':25}{As_Dollars(Lincense_Fee):>10}")
print(f"{'Transfer Fee:':25}{As_Dollars(Transfer):>10}")
print(f"{' ':25}{'-' * 10}")
print(f"{'Total Sales Cost: ':25}{As_Dollars(Total_Price):>10}")
print()
print(f"Terms: {Pay_Plan}{' '* 9}{'Total payments:'} {Payment}")
print(f"{'Monthly payment':25}{As_Dollars(Monthly_Payment):>10}")
print()
print(" " * 3, "Honest Harry Car Sales")
print("Best used cars at the best price!")
print()
# This code prompts the user if they want to make another entry(Secondary Exit Point for the user)
while True:
Continue = input("Do you want to make another Entry? (Y) or (N) ")
if Continue.upper() == "Y":
break
elif Continue.upper() == "N":
print()
Cus_First_Name = "END"
break
else:
print("Incorrect Value entered, Please Enter Y or N")
if Cus_First_Name == "END":
print()
print("Good Bye")
print()
break | [
"datetime.datetime.strptime",
"datetime.datetime.now",
"datetime.timedelta"
] | [((1529, 1575), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['MIN_CAR_DATE', '"""%Y"""'], {}), "(MIN_CAR_DATE, '%Y')\n", (1555, 1575), False, 'import datetime\n'), ((1654, 1677), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1675, 1677), False, 'import datetime\n'), ((1065, 1109), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['Date', '"""%Y-%m-%d"""'], {}), "(Date, '%Y-%m-%d')\n", (1091, 1109), False, 'import datetime\n'), ((1702, 1730), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1720, 1730), False, 'import datetime\n'), ((11644, 11671), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (11662, 11671), False, 'import datetime\n'), ((7175, 7217), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['Car_Year', '"""%Y"""'], {}), "(Car_Year, '%Y')\n", (7201, 7217), False, 'import datetime\n'), ((9677, 9725), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['Expiry_Date', '"""%m/%y"""'], {}), "(Expiry_Date, '%m/%y')\n", (9703, 9725), False, 'import datetime\n'), ((5539, 5562), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5560, 5562), False, 'import datetime\n'), ((7041, 7083), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['Car_Year', '"""%Y"""'], {}), "(Car_Year, '%Y')\n", (7067, 7083), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from projections import *
# -----------------------------------------------------------------------------
def polar_frame(ax, title=None, legend=False, zoom=False, labels=True):
""" Draw a polar frame """
for rho in [0, 2,5,10,20,40,60,80,90]:
lw, color, alpha = 1, '0.00', 0.25
if rho == 90 and not zoom:
color, lw, alpha = '0.00', 2, 1
n = 500
R = np.ones(n)*rho/90.0
T = np.linspace(-np.pi/2,np.pi/2,n)
X,Y = polar_to_cartesian(R,T)
ax.plot(X, Y-1/2, color=color, lw=lw, alpha=alpha)
if not zoom and rho in [0,10,20,40,80] and labels:
ax.text(X[-1]*1.0-0.075, Y[-1],u'%d°' % rho, color='k', # size=15,
horizontalalignment='center', verticalalignment='center')
for theta in [-90,-60,-30,0,+30,+60,+90]:
lw, color, alpha = 1, '0.00', 0.25
if theta in[-90,+90] and not zoom:
color, lw, alpha = '0.00', 2, 1
angle = theta/90.0*np.pi/2
n = 500
R = np.linspace(0,1,n)
T = np.ones(n)*angle
X,Y = polar_to_cartesian(R,T)
ax.plot(X, Y, color=color, lw=lw, alpha=alpha)
if not zoom and theta in [-90,-60,-30,+30,+60,+90] and labels:
ax.text(X[-1]*1.05, Y[-1]*1.05,u'%d°' % theta, color='k', # size=15,
horizontalalignment='left', verticalalignment='center')
d = 0.01
ax.set_xlim( 0.0-d, 1.0+d)
ax.set_ylim(-1.0-d, 1.0+d)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.set_frame_on(True)
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',-1.2))
ax.set_xticks([])
ax.text(0.0,-1.1, "$\longleftarrow$ Foveal",
verticalalignment='top', horizontalalignment='left', size=12)
ax.text(1.0,-1.1, "Peripheral $\longrightarrow$",
verticalalignment='top', horizontalalignment='right', size=12)
else:
ax.set_frame_on(False)
if title:
ax.title(title)
# -----------------------------------------------------------------------------
def logpolar_frame(ax, title=None, legend=False, labels=True):
""" Draw a log polar frame """
for rho in [2,5,10,20,40,60,80,90]:
lw, color, alpha = 1, '0.00', 0.25
if rho == 90:
color, lw, alpha = '0.00', 2, 1
n = 500
R = np.ones(n)*rho/90.0
T = np.linspace(-np.pi/2,np.pi/2,n)
X,Y = polar_to_logpolar(R,T)
X,Y = X*2, 2*Y-1
ax.plot(X, Y, color=color, lw=lw, alpha=alpha)
if labels and rho in [2,5,10,20,40,80]:
ax.text(X[-1], Y[-1]+0.05, u'%d°' % rho, color='k', # size=15,
horizontalalignment='right', verticalalignment='bottom')
for theta in [-90,-60,-30, 0, +30,+60,+90]:
lw, color, alpha = 1, '0.00', 0.25
if theta in[-90,+90]:
color, lw, alpha = '0.00', 2, 1
angle = theta/90.0*np.pi/2
n = 500
R = np.linspace(0,1,n)
T = np.ones(n)*angle
X,Y = polar_to_logpolar(R,T)
X,Y = X*2, 2*Y-1
ax.plot(X,Y, color=color, lw=lw, alpha=alpha)
if labels:
ax.text(X[-1]*1.0+.05, Y[-1]*1.0,u'%d°' % theta, color='k', # size=15,
horizontalalignment='left', verticalalignment='center')
d = 0.01
ax.set_xlim( 0.0-d, 2.0+d)
ax.set_ylim(-1.0-d, 1.0+d)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.set_frame_on(True)
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',-1.2))
ax.set_xticks([0,2])
ax.set_xticklabels(['0', '4.8 (mm)'])
ax.text(0.0,-1.1, "$\longleftarrow$ Rostral",
verticalalignment='top', horizontalalignment='left', size=12)
ax.text(2,-1.1, "Caudal $\longrightarrow$",
verticalalignment='top', horizontalalignment='right', size=12)
else:
ax.set_frame_on(False)
if title:
ax.title(title)
# -----------------------------------------------------------------------------
def polar_imshow(axis, Z, *args, **kwargs):
kwargs['interpolation'] = kwargs.get('interpolation', 'nearest')
kwargs['cmap'] = kwargs.get('cmap', plt.cm.gray_r)
#kwargs['vmin'] = kwargs.get('vmin', Z.min())
#kwargs['vmax'] = kwargs.get('vmax', Z.max())
kwargs['vmin'] = kwargs.get('vmin', 0)
kwargs['vmax'] = kwargs.get('vmax', 1)
kwargs['origin'] = kwargs.get('origin', 'lower')
axis.imshow(Z, extent=[0,1,-1, 1], *args, **kwargs)
# -----------------------------------------------------------------------------
def logpolar_imshow(axis, Z, *args, **kwargs):
kwargs['interpolation'] = kwargs.get('interpolation', 'nearest')
kwargs['cmap'] = kwargs.get('cmap', plt.cm.gray_r)
#kwargs['vmin'] = kwargs.get('vmin', Z.min())
#kwargs['vmax'] = kwargs.get('vmax', Z.max())
kwargs['vmin'] = kwargs.get('vmin', 0)
kwargs['vmax'] = kwargs.get('vmax', 1)
kwargs['origin'] = kwargs.get('origin', 'lower')
im = axis.imshow(Z, extent=[0,2,-1, 1], *args, **kwargs)
# axins = inset_axes(axis, width='25%', height='5%', loc=3)
# vmin, vmax = Z.min(), Z.max()
# plt.colorbar(im, cax=axins, orientation='horizontal', ticks=[vmin,vmax], format = '%.2f')
# axins.xaxis.set_ticks_position('bottom')
| [
"numpy.linspace",
"numpy.ones"
] | [((2374, 2411), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi / 2)', 'n'], {}), '(-np.pi / 2, np.pi / 2, n)\n', (2385, 2411), True, 'import numpy as np\n'), ((2961, 2981), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2972, 2981), True, 'import numpy as np\n'), ((4499, 4536), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi / 2)', 'n'], {}), '(-np.pi / 2, np.pi / 2, n)\n', (4510, 4536), True, 'import numpy as np\n'), ((5081, 5101), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (5092, 5101), True, 'import numpy as np\n'), ((2992, 3002), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2999, 3002), True, 'import numpy as np\n'), ((5112, 5122), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (5119, 5122), True, 'import numpy as np\n'), ((2342, 2352), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2349, 2352), True, 'import numpy as np\n'), ((4467, 4477), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4474, 4477), True, 'import numpy as np\n')] |
from flask_wtf import Form
from wtforms import (TextField, StringField, BooleanField,PasswordField,
validators)
from .utils import Unique
from .models import User
from .constants import (USER_LEN_MSG, USER_REQ_MSG, USER_DUPL_MSG,
EMAIL_FORMAT_MSG, EMAIL_REQ_MSG, EMAIL_DUPL_MSG,
PWD_REQ_MSG, PWD_LEN_MSG, PWD_MATCH_MSG, INCORRECT_PWD)
class LoginForm(Form):
username = TextField('Username',
[validators.Length(max=25,message=USER_LEN_MSG),
validators.Required(message=USER_REQ_MSG)])
password = PasswordField('Password',
[validators.Required(message=PWD_REQ_MSG)])
class SignupForm(Form):
username = TextField('Username', [validators.Length(max=25,
message=USER_LEN_MSG),
validators.Required(message=USER_REQ_MSG),
Unique(User,User.username, message=USER_DUPL_MSG)])
email = TextField('Email', [validators.Email(message=EMAIL_FORMAT_MSG),
validators.Required(message=EMAIL_REQ_MSG),
Unique(User, User.email, message=EMAIL_DUPL_MSG)])
password = PasswordField('Password', [validators.Length(max=25,
message=PWD_REQ_MSG),
validators.InputRequired(message=PWD_REQ_MSG),
validators.EqualTo('confirm',
message=PWD_MATCH_MSG)])
confirm = PasswordField('Repeat Password')
| [
"wtforms.validators.Email",
"wtforms.PasswordField",
"wtforms.validators.EqualTo",
"wtforms.validators.Required",
"wtforms.validators.Length",
"wtforms.validators.InputRequired"
] | [((1615, 1647), 'wtforms.PasswordField', 'PasswordField', (['"""Repeat Password"""'], {}), "('Repeat Password')\n", (1628, 1647), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((491, 538), 'wtforms.validators.Length', 'validators.Length', ([], {'max': '(25)', 'message': 'USER_LEN_MSG'}), '(max=25, message=USER_LEN_MSG)\n', (508, 538), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((567, 608), 'wtforms.validators.Required', 'validators.Required', ([], {'message': 'USER_REQ_MSG'}), '(message=USER_REQ_MSG)\n', (586, 608), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((681, 721), 'wtforms.validators.Required', 'validators.Required', ([], {'message': 'PWD_REQ_MSG'}), '(message=PWD_REQ_MSG)\n', (700, 721), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((788, 835), 'wtforms.validators.Length', 'validators.Length', ([], {'max': '(25)', 'message': 'USER_LEN_MSG'}), '(max=25, message=USER_LEN_MSG)\n', (805, 835), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((919, 960), 'wtforms.validators.Required', 'validators.Required', ([], {'message': 'USER_REQ_MSG'}), '(message=USER_REQ_MSG)\n', (938, 960), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((1072, 1114), 'wtforms.validators.Email', 'validators.Email', ([], {'message': 'EMAIL_FORMAT_MSG'}), '(message=EMAIL_FORMAT_MSG)\n', (1088, 1114), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((1136, 1178), 'wtforms.validators.Required', 'validators.Required', ([], {'message': 'EMAIL_REQ_MSG'}), '(message=EMAIL_REQ_MSG)\n', (1155, 1178), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((1293, 1339), 'wtforms.validators.Length', 'validators.Length', ([], {'max': '(25)', 'message': 'PWD_REQ_MSG'}), '(max=25, message=PWD_REQ_MSG)\n', (1310, 1339), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((1426, 1471), 'wtforms.validators.InputRequired', 'validators.InputRequired', ([], {'message': 'PWD_REQ_MSG'}), '(message=PWD_REQ_MSG)\n', (1450, 1471), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n'), ((1500, 1552), 'wtforms.validators.EqualTo', 'validators.EqualTo', (['"""confirm"""'], {'message': 'PWD_MATCH_MSG'}), "('confirm', message=PWD_MATCH_MSG)\n", (1518, 1552), False, 'from wtforms import TextField, StringField, BooleanField, PasswordField, validators\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import adsk.fusion
import unittest
# note: load_tests is required for the "pattern" test filtering functionality in loadTestsFromModule in run()
from fscad.test_utils import FscadTestCase, load_tests
from fscad.fscad import *
class ThickenTest(FscadTestCase):
def test_basic_cylinder(self):
cylinder = Cylinder(1, 1)
Thicken(cylinder.side, 1).create_occurrence(create_children=True)
def test_quarter_cylinder(self):
cylinder = Cylinder(1, 1)
box = Box(1, 1, 1)
box.place(
-box == ~cylinder,
-box == ~cylinder,
-box == -cylinder)
assembly = Intersection(cylinder, box)
Thicken(assembly.find_faces(cylinder.side)[0], 1).create_occurrence(create_children=True)
def test_cylinder_with_hole(self):
cylinder = Cylinder(1, 1)
hole = Cylinder(1, .25, name="hole")
hole.rx(90)
hole.place(
~hole == ~cylinder,
+hole == ~cylinder,
~hole == ~cylinder)
assembly = Difference(cylinder, hole)
Thicken(assembly.find_faces(cylinder.side)[0], 1).create_occurrence(create_children=True)
def test_rotated_quarter_cylinder(self):
cylinder = Cylinder(1, 1)
box = Box(1, 1, 1)
box.place(
-box == ~cylinder,
-box == ~cylinder,
-box == -cylinder)
assembly = Intersection(cylinder, box)
assembly.ry(45)
Thicken(assembly.find_faces(cylinder.side)[0], 1).create_occurrence(create_children=True)
def test_translated_quarter_cylinder(self):
cylinder = Cylinder(1, 1)
box = Box(1, 1, 1)
box.place(
-box == ~cylinder,
-box == ~cylinder,
-box == -cylinder)
assembly = Intersection(cylinder, box)
assembly.tx(.5)
Thicken(assembly.find_faces(cylinder.side)[0], 1).create_occurrence(create_children=True)
def test_truncated_cone(self):
cone = Cylinder(1, 1, .5, name="cone")
Thicken(cone.side, 1).create_occurrence(create_children=True)
def test_full_cone(self):
cone = Cylinder(1, 1, 0, name="cone")
Thicken(cone.side, 1).create_occurrence(create_children=True)
def test_cylindrical_face(self):
cylinder = Cylinder(1, 1)
Thicken(cylinder.side.make_component(), 1).create_occurrence(create_children=True)
def test_box_negative_thickness(self):
box = Box(1, 1, 1)
Thicken(box.top, -1.5).create_occurrence(create_children=True)
def test_cylinder_face_large_negative_thickness(self):
cylinder = Cylinder(1, 1)
Thicken(cylinder.side, -3).create_occurrence(create_children=True)
def test_cylinder_face_small_negative_thickness(self):
cylinder = Cylinder(1, 1)
Thicken(cylinder.side, -.5).create_occurrence(create_children=True)
def test_thicken_body(self):
box = Box(1, 1, 1)
Thicken(box, 1).create_occurrence(create_children=True)
def test_multiple_components(self):
box1 = Box(1, 1, 1)
box2 = Box(1, 1, 1)
box2.place(
(-box2 == +box1) + .1,
~box2 == ~box1,
~box2 == ~box1)
Thicken((box1, box2), 1).create_occurrence(create_children=True)
def test_multiple_faces(self):
box = Box(1, 1, 1)
Thicken((box.front, box.top), 1).create_occurrence(create_children=True)
def run(context):
import sys
test_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__]
#, pattern="multiple_faces"
)
unittest.TextTestRunner(failfast=True).run(test_suite)
| [
"unittest.defaultTestLoader.loadTestsFromModule",
"unittest.TextTestRunner"
] | [((4064, 4133), 'unittest.defaultTestLoader.loadTestsFromModule', 'unittest.defaultTestLoader.loadTestsFromModule', (['sys.modules[__name__]'], {}), '(sys.modules[__name__])\n', (4110, 4133), False, 'import unittest\n'), ((4189, 4227), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'failfast': '(True)'}), '(failfast=True)\n', (4212, 4227), False, 'import unittest\n')] |
"""Unit tests for radar_statistics.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import radar_statistics as radar_stats
TOLERANCE = 1e-6
FAKE_STATISTIC_NAME = 'foo'
FAKE_PERCENTILE_LEVEL = -9999.
# The following constants are used to test
# radar_field_and_statistic_to_column_name,
# radar_field_and_percentile_to_column_name, and
# _column_name_to_statistic_params.
RADAR_FIELD_NAME = 'reflectivity_dbz'
RADAR_HEIGHT_M_ASL = 250
STATISTIC_NAME = 'kurtosis'
COLUMN_NAME_FOR_NON_PERCENTILE = 'reflectivity_dbz_250metres_kurtosis'
PERCENTILE_LEVEL_UNROUNDED = 75.12
PERCENTILE_LEVEL_ROUNDED = 75.1
COLUMN_NAME_FOR_PERCENTILE = 'reflectivity_dbz_250metres_percentile075.1'
INVALID_COLUMN_NAME = 'foo'
# The following constants are used to test extract_radar_grid_points.
RADAR_FIELD_MATRIX = numpy.array([
[-1, -1, 10, 20, 30, 40],
[-1, 5, 15, 25, 35, 50],
[5, 10, 25, 40, 55, 70],
[10, 30, 50, 70, 75, -1]
], dtype=float)
RADAR_FIELD_MATRIX[RADAR_FIELD_MATRIX < 0] = numpy.nan
ROW_INDICES_FOR_1D_ARRAY = numpy.array([0, 0, 1, 1, 2, 2, 3, 3], dtype=int)
COLUMN_INDICES_FOR_1D_ARRAY = numpy.array([0, 5, 1, 4, 2, 3, 0, 5], dtype=int)
RADAR_FIELD_1D_ARRAY = numpy.array([
numpy.nan, 40, 5, 35, 25, 40, 10, numpy.nan
])
# The following constants are used to test get_spatial_statistics.
RADAR_FIELD_FOR_STATS = numpy.array([
[-1, 0, 20],
[20, 50, 60]
], dtype=float)
RADAR_FIELD_FOR_STATS[RADAR_FIELD_FOR_STATS < 0] = numpy.nan
STATISTIC_NAMES = [
radar_stats.AVERAGE_NAME, radar_stats.STANDARD_DEVIATION_NAME,
radar_stats.SKEWNESS_NAME, radar_stats.KURTOSIS_NAME
]
STATISTIC_VALUES = numpy.array([30, 24.494897, 0.170103, -1.75])
PERCENTILE_LEVELS = numpy.array([0, 5, 25, 50, 75, 95, 100], dtype=float)
PERCENTILE_VALUES = numpy.array([0, 4, 20, 20, 50, 58, 60], dtype=float)
class RadarStatisticsTests(unittest.TestCase):
"""Each method is a unit test for radar_statistics.py."""
def test_radar_field_and_statistic_to_column_name(self):
"""Ensures correctness of radar_field_and_statistic_to_column_name."""
this_column_name = radar_stats.radar_field_and_statistic_to_column_name(
radar_field_name=RADAR_FIELD_NAME,
radar_height_m_asl=RADAR_HEIGHT_M_ASL,
statistic_name=STATISTIC_NAME)
self.assertTrue(this_column_name == COLUMN_NAME_FOR_NON_PERCENTILE)
def test_radar_field_and_percentile_to_column_name_reflectivity(self):
"""Ensures correctness of radar_field_and_percentile_to_column_name."""
this_column_name = (
radar_stats.radar_field_and_percentile_to_column_name(
radar_field_name=RADAR_FIELD_NAME,
radar_height_m_asl=RADAR_HEIGHT_M_ASL,
percentile_level=PERCENTILE_LEVEL_UNROUNDED)
)
self.assertTrue(this_column_name == COLUMN_NAME_FOR_PERCENTILE)
def test_column_name_to_statistic_params_percentile(self):
"""Ensures correct output from _column_name_to_statistic_params.
In this case, statistic is a percentile.
"""
this_parameter_dict = radar_stats._column_name_to_statistic_params(
COLUMN_NAME_FOR_PERCENTILE)
self.assertFalse(
this_parameter_dict[radar_stats.IS_GRIDRAD_STATISTIC_KEY]
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_FIELD_NAME_KEY] ==
RADAR_FIELD_NAME
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_HEIGHT_KEY] ==
RADAR_HEIGHT_M_ASL
)
self.assertTrue(
this_parameter_dict[radar_stats.STATISTIC_NAME_KEY] is None
)
self.assertTrue(
this_parameter_dict[radar_stats.PERCENTILE_LEVEL_KEY] ==
PERCENTILE_LEVEL_ROUNDED
)
def test_column_name_to_statistic_params_non_percentile(self):
"""Ensures correct output from _column_name_to_statistic_params.
In this case, statistic is *not* a percentile.
"""
this_parameter_dict = radar_stats._column_name_to_statistic_params(
COLUMN_NAME_FOR_NON_PERCENTILE)
self.assertFalse(
this_parameter_dict[radar_stats.IS_GRIDRAD_STATISTIC_KEY]
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_FIELD_NAME_KEY] ==
RADAR_FIELD_NAME
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_HEIGHT_KEY] ==
RADAR_HEIGHT_M_ASL
)
self.assertTrue(
this_parameter_dict[radar_stats.STATISTIC_NAME_KEY] ==
STATISTIC_NAME
)
self.assertTrue(
this_parameter_dict[radar_stats.PERCENTILE_LEVEL_KEY] is None
)
def test_column_name_to_statistic_params_invalid(self):
"""Ensures correct output from _column_name_to_statistic_params.
In this case, column name is invalid (does not correspond to a radar
statistic).
"""
this_parameter_dict = radar_stats._column_name_to_statistic_params(
INVALID_COLUMN_NAME)
self.assertTrue(this_parameter_dict is None)
def test_check_statistic_params_all_good(self):
"""Ensures correct output from _check_statistic_params.
In this case, all inputs are valid.
"""
radar_stats._check_statistic_params(
radar_stats.STATISTIC_NAMES, radar_stats.DEFAULT_PERCENTILE_LEVELS)
def test_check_statistic_params_bad_string(self):
"""Ensures correct output from _check_statistic_params.
In this case, one statistic name is invalid.
"""
with self.assertRaises(ValueError):
radar_stats._check_statistic_params(
radar_stats.STATISTIC_NAMES + [FAKE_STATISTIC_NAME],
radar_stats.DEFAULT_PERCENTILE_LEVELS
)
def test_check_statistic_params_bad_percentile(self):
"""Ensures correct output from _check_statistic_params.
In this case, one percentile level is invalid.
"""
these_percentile_levels = numpy.concatenate((
radar_stats.DEFAULT_PERCENTILE_LEVELS,
numpy.array([FAKE_PERCENTILE_LEVEL])
))
with self.assertRaises(ValueError):
radar_stats._check_statistic_params(
radar_stats.STATISTIC_NAMES, these_percentile_levels)
def test_extract_radar_grid_points(self):
"""Ensures correct output from extract_radar_grid_points."""
this_field_1d_array = radar_stats.extract_radar_grid_points(
RADAR_FIELD_MATRIX, row_indices=ROW_INDICES_FOR_1D_ARRAY,
column_indices=COLUMN_INDICES_FOR_1D_ARRAY)
self.assertTrue(numpy.allclose(
this_field_1d_array, RADAR_FIELD_1D_ARRAY, equal_nan=True,
atol=TOLERANCE
))
def test_get_spatial_statistics(self):
"""Ensures correct output from get_spatial_statistics."""
these_statistic_values, these_percentile_values = (
radar_stats.get_spatial_statistics(
RADAR_FIELD_FOR_STATS, statistic_names=STATISTIC_NAMES,
percentile_levels=PERCENTILE_LEVELS)
)
self.assertTrue(numpy.allclose(
these_statistic_values, STATISTIC_VALUES, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_percentile_values, PERCENTILE_VALUES, atol=TOLERANCE
))
if __name__ == '__main__':
unittest.main()
| [
"gewittergefahr.gg_utils.radar_statistics._check_statistic_params",
"gewittergefahr.gg_utils.radar_statistics.get_spatial_statistics",
"numpy.allclose",
"gewittergefahr.gg_utils.radar_statistics.radar_field_and_percentile_to_column_name",
"numpy.array",
"gewittergefahr.gg_utils.radar_statistics.radar_fiel... | [((815, 947), 'numpy.array', 'numpy.array', (['[[-1, -1, 10, 20, 30, 40], [-1, 5, 15, 25, 35, 50], [5, 10, 25, 40, 55, 70],\n [10, 30, 50, 70, 75, -1]]'], {'dtype': 'float'}), '([[-1, -1, 10, 20, 30, 40], [-1, 5, 15, 25, 35, 50], [5, 10, 25,\n 40, 55, 70], [10, 30, 50, 70, 75, -1]], dtype=float)\n', (826, 947), False, 'import numpy\n'), ((1046, 1094), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1, 2, 2, 3, 3]'], {'dtype': 'int'}), '([0, 0, 1, 1, 2, 2, 3, 3], dtype=int)\n', (1057, 1094), False, 'import numpy\n'), ((1125, 1173), 'numpy.array', 'numpy.array', (['[0, 5, 1, 4, 2, 3, 0, 5]'], {'dtype': 'int'}), '([0, 5, 1, 4, 2, 3, 0, 5], dtype=int)\n', (1136, 1173), False, 'import numpy\n'), ((1197, 1255), 'numpy.array', 'numpy.array', (['[numpy.nan, 40, 5, 35, 25, 40, 10, numpy.nan]'], {}), '([numpy.nan, 40, 5, 35, 25, 40, 10, numpy.nan])\n', (1208, 1255), False, 'import numpy\n'), ((1354, 1407), 'numpy.array', 'numpy.array', (['[[-1, 0, 20], [20, 50, 60]]'], {'dtype': 'float'}), '([[-1, 0, 20], [20, 50, 60]], dtype=float)\n', (1365, 1407), False, 'import numpy\n'), ((1647, 1692), 'numpy.array', 'numpy.array', (['[30, 24.494897, 0.170103, -1.75]'], {}), '([30, 24.494897, 0.170103, -1.75])\n', (1658, 1692), False, 'import numpy\n'), ((1713, 1766), 'numpy.array', 'numpy.array', (['[0, 5, 25, 50, 75, 95, 100]'], {'dtype': 'float'}), '([0, 5, 25, 50, 75, 95, 100], dtype=float)\n', (1724, 1766), False, 'import numpy\n'), ((1787, 1839), 'numpy.array', 'numpy.array', (['[0, 4, 20, 20, 50, 58, 60]'], {'dtype': 'float'}), '([0, 4, 20, 20, 50, 58, 60], dtype=float)\n', (1798, 1839), False, 'import numpy\n'), ((7506, 7521), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7519, 7521), False, 'import unittest\n'), ((2120, 2287), 'gewittergefahr.gg_utils.radar_statistics.radar_field_and_statistic_to_column_name', 'radar_stats.radar_field_and_statistic_to_column_name', ([], {'radar_field_name': 'RADAR_FIELD_NAME', 'radar_height_m_asl': 'RADAR_HEIGHT_M_ASL', 'statistic_name': 'STATISTIC_NAME'}), '(radar_field_name=\n RADAR_FIELD_NAME, radar_height_m_asl=RADAR_HEIGHT_M_ASL, statistic_name\n =STATISTIC_NAME)\n', (2172, 2287), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((2590, 2771), 'gewittergefahr.gg_utils.radar_statistics.radar_field_and_percentile_to_column_name', 'radar_stats.radar_field_and_percentile_to_column_name', ([], {'radar_field_name': 'RADAR_FIELD_NAME', 'radar_height_m_asl': 'RADAR_HEIGHT_M_ASL', 'percentile_level': 'PERCENTILE_LEVEL_UNROUNDED'}), '(radar_field_name=\n RADAR_FIELD_NAME, radar_height_m_asl=RADAR_HEIGHT_M_ASL,\n percentile_level=PERCENTILE_LEVEL_UNROUNDED)\n', (2643, 2771), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((3125, 3197), 'gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params', 'radar_stats._column_name_to_statistic_params', (['COLUMN_NAME_FOR_PERCENTILE'], {}), '(COLUMN_NAME_FOR_PERCENTILE)\n', (3169, 3197), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((4070, 4146), 'gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params', 'radar_stats._column_name_to_statistic_params', (['COLUMN_NAME_FOR_NON_PERCENTILE'], {}), '(COLUMN_NAME_FOR_NON_PERCENTILE)\n', (4114, 4146), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((5044, 5109), 'gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params', 'radar_stats._column_name_to_statistic_params', (['INVALID_COLUMN_NAME'], {}), '(INVALID_COLUMN_NAME)\n', (5088, 5109), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((5360, 5467), 'gewittergefahr.gg_utils.radar_statistics._check_statistic_params', 'radar_stats._check_statistic_params', (['radar_stats.STATISTIC_NAMES', 'radar_stats.DEFAULT_PERCENTILE_LEVELS'], {}), '(radar_stats.STATISTIC_NAMES,\n radar_stats.DEFAULT_PERCENTILE_LEVELS)\n', (5395, 5467), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((6561, 6705), 'gewittergefahr.gg_utils.radar_statistics.extract_radar_grid_points', 'radar_stats.extract_radar_grid_points', (['RADAR_FIELD_MATRIX'], {'row_indices': 'ROW_INDICES_FOR_1D_ARRAY', 'column_indices': 'COLUMN_INDICES_FOR_1D_ARRAY'}), '(RADAR_FIELD_MATRIX, row_indices=\n ROW_INDICES_FOR_1D_ARRAY, column_indices=COLUMN_INDICES_FOR_1D_ARRAY)\n', (6598, 6705), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((7059, 7191), 'gewittergefahr.gg_utils.radar_statistics.get_spatial_statistics', 'radar_stats.get_spatial_statistics', (['RADAR_FIELD_FOR_STATS'], {'statistic_names': 'STATISTIC_NAMES', 'percentile_levels': 'PERCENTILE_LEVELS'}), '(RADAR_FIELD_FOR_STATS, statistic_names=\n STATISTIC_NAMES, percentile_levels=PERCENTILE_LEVELS)\n', (7093, 7191), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((5719, 5851), 'gewittergefahr.gg_utils.radar_statistics._check_statistic_params', 'radar_stats._check_statistic_params', (['(radar_stats.STATISTIC_NAMES + [FAKE_STATISTIC_NAME])', 'radar_stats.DEFAULT_PERCENTILE_LEVELS'], {}), '(radar_stats.STATISTIC_NAMES + [\n FAKE_STATISTIC_NAME], radar_stats.DEFAULT_PERCENTILE_LEVELS)\n', (5754, 5851), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((6307, 6400), 'gewittergefahr.gg_utils.radar_statistics._check_statistic_params', 'radar_stats._check_statistic_params', (['radar_stats.STATISTIC_NAMES', 'these_percentile_levels'], {}), '(radar_stats.STATISTIC_NAMES,\n these_percentile_levels)\n', (6342, 6400), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((6751, 6844), 'numpy.allclose', 'numpy.allclose', (['this_field_1d_array', 'RADAR_FIELD_1D_ARRAY'], {'equal_nan': '(True)', 'atol': 'TOLERANCE'}), '(this_field_1d_array, RADAR_FIELD_1D_ARRAY, equal_nan=True,\n atol=TOLERANCE)\n', (6765, 6844), False, 'import numpy\n'), ((7255, 7327), 'numpy.allclose', 'numpy.allclose', (['these_statistic_values', 'STATISTIC_VALUES'], {'atol': 'TOLERANCE'}), '(these_statistic_values, STATISTIC_VALUES, atol=TOLERANCE)\n', (7269, 7327), False, 'import numpy\n'), ((7375, 7449), 'numpy.allclose', 'numpy.allclose', (['these_percentile_values', 'PERCENTILE_VALUES'], {'atol': 'TOLERANCE'}), '(these_percentile_values, PERCENTILE_VALUES, atol=TOLERANCE)\n', (7389, 7449), False, 'import numpy\n'), ((6202, 6238), 'numpy.array', 'numpy.array', (['[FAKE_PERCENTILE_LEVEL]'], {}), '([FAKE_PERCENTILE_LEVEL])\n', (6213, 6238), False, 'import numpy\n')] |
import math
import re
import time
from dataclasses import dataclass
from enum import Enum
from typing import List, NoReturn
class Direction(Enum):
LEFT = 'L'
RIGHT = 'R'
FORWARD = 'F'
@staticmethod
def is_direction(value: str) -> bool:
return value in {e.value for e in set(Direction)}
class Axis(Enum):
NORTH = 'N'
EAST = 'E'
SOUTH = 'S'
WEST = 'W'
def rotate(self, direction: Direction, degrees: int) -> 'Axis':
direction_order = list(Axis)
if direction is Direction.LEFT:
direction_order.reverse()
shift = int(degrees / 90)
index = (direction_order.index(self) + shift) % 4
return direction_order[index]
@staticmethod
def is_axis(value: str) -> bool:
return value in {e.value for e in set(Axis)}
@dataclass
class Instruction:
action: str
value: int
@dataclass
class Point:
x: int
y: int
def move(self, axis: Axis, value: int) -> NoReturn:
if axis is Axis.NORTH:
self.y += value
elif axis is Axis.SOUTH:
self.y -= value
elif axis is Axis.EAST:
self.x += value
elif axis is Axis.WEST:
self.x -= value
def rotate(self, direction: Direction, degrees: int) -> NoReturn:
shift = int(degrees / 90)
for i in range(shift):
if direction is Direction.RIGHT:
new_x = self.y
new_y = -self.x
else:
new_x = -self.y
new_y = self.x
self.x = new_x
self.y = new_y
class NavigationSystem:
def __init__(self, start_axis: Axis):
self.ship_position = Point(0, 0)
self.axis = start_axis
def process_instructions(self, instructions: List[Instruction]) -> NoReturn:
for instruction in instructions:
if Axis.is_axis(instruction.action):
self.move(Axis(instruction.action), instruction.value)
elif Direction.is_direction(instruction.action):
direction = Direction(instruction.action)
if direction is Direction.FORWARD:
self.move_forward(instruction.value)
else:
self.rotate(direction, instruction.value)
def move(self, axis: Axis, value: int) -> NoReturn:
self.ship_position.move(axis, value)
def move_forward(self, value: int) -> NoReturn:
self.move(self.axis, value)
def rotate(self, direction: Direction, value: int) -> NoReturn:
self.axis = self.axis.rotate(direction, value)
def manhattan_distance(self) -> float:
return math.fabs(self.ship_position.x) + math.fabs(self.ship_position.y)
class WaypointNavigationSystem(NavigationSystem):
def __init__(self, start_axis: Axis, waypoint_position: Point):
super().__init__(start_axis)
self.waypoint_position = waypoint_position
def move(self, axis: Axis, value: int) -> NoReturn:
self.waypoint_position.move(axis, value)
def move_forward(self, value: int) -> NoReturn:
self.ship_position.x += self.waypoint_position.x * value
self.ship_position.y += self.waypoint_position.y * value
def rotate(self, direction: Direction, value: int) -> NoReturn:
self.waypoint_position.rotate(direction, value)
if __name__ == '__main__':
with open('rain_risk.txt') as f:
start_time = time.time()
all_file = f.read()
instructions = [Instruction(m.group(1), int(m.group(2))) for m in re.finditer(r'(\w)(\d+)', all_file)]
navigation = NavigationSystem(Axis.EAST)
navigation.process_instructions(instructions)
manhattan_distance = navigation.manhattan_distance()
print(f'Manhattan distance Part One: {manhattan_distance}')
waypoint_navigation = WaypointNavigationSystem(Axis.EAST, Point(10, 1))
waypoint_navigation.process_instructions(instructions)
manhattan_distance = waypoint_navigation.manhattan_distance()
print(f'Manhattan distance Part Two: {manhattan_distance}')
print(f'Took {time.time() - start_time} seconds')
| [
"math.fabs",
"time.time",
"re.finditer"
] | [((3453, 3464), 'time.time', 'time.time', ([], {}), '()\n', (3462, 3464), False, 'import time\n'), ((2677, 2708), 'math.fabs', 'math.fabs', (['self.ship_position.x'], {}), '(self.ship_position.x)\n', (2686, 2708), False, 'import math\n'), ((2711, 2742), 'math.fabs', 'math.fabs', (['self.ship_position.y'], {}), '(self.ship_position.y)\n', (2720, 2742), False, 'import math\n'), ((3567, 3603), 're.finditer', 're.finditer', (['"""(\\\\w)(\\\\d+)"""', 'all_file'], {}), "('(\\\\w)(\\\\d+)', all_file)\n", (3578, 3603), False, 'import re\n'), ((4139, 4150), 'time.time', 'time.time', ([], {}), '()\n', (4148, 4150), False, 'import time\n')] |
from sklearn.decomposition import NMF
from nltk.tokenize import sent_tokenize
import numpy as np
class NonNegativeFactorization():
def __init__(self, A, r, feature_names, num_top_words, num_top_documents, corpus):
self.A = A
self.r = r
self.features_names = feature_names
self.corpus = corpus
self.num_top_words = num_top_words
self.num_top_documents = num_top_documents
def decomposition(self):
nmf_model = NMF(n_components=self.r, init='nndsvdar', solver='mu', beta_loss='frobenius', tol=0.1,
random_state=1)
self.W = nmf_model.fit_transform(self.A)
self.H = nmf_model.components_
self.frobenius_norm = nmf_model.reconstruction_err_
self.iter = nmf_model.n_iter_
self.WH = self.W.dot(self.H)
def display_summary(self):
self.data = []
self.index_data = []
for topic_index, topic in enumerate(self.H):
self.data.append([self.features_names[i] for i in topic.argsort()[:-self.num_top_words - 1:-1]])
top_doc_indices = np.argsort(self.W[:, topic_index])[::-1][0:self.num_top_documents]
self.index_data.append(top_doc_indices)
summary_list = []
for index in self.index_data[0]:
summary_list.append(self.corpus[index])
self.summary = summary_list
data = {
'top_words': self.data[0],
'summary_result': self.summary
}
return data
| [
"numpy.argsort",
"sklearn.decomposition.NMF"
] | [((475, 582), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'self.r', 'init': '"""nndsvdar"""', 'solver': '"""mu"""', 'beta_loss': '"""frobenius"""', 'tol': '(0.1)', 'random_state': '(1)'}), "(n_components=self.r, init='nndsvdar', solver='mu', beta_loss=\n 'frobenius', tol=0.1, random_state=1)\n", (478, 582), False, 'from sklearn.decomposition import NMF\n'), ((1103, 1137), 'numpy.argsort', 'np.argsort', (['self.W[:, topic_index]'], {}), '(self.W[:, topic_index])\n', (1113, 1137), True, 'import numpy as np\n')] |
from ..utils import entropy_gaussian
from ..core import cmutinf, centropy, ncmutinf
from ..metrics import (AlphaAngleTransferEntropy, ContactTransferEntropy,
DihedralTransferEntropy)
from msmbuilder.example_datasets import FsPeptide
import numpy as np
from numpy.testing import assert_almost_equal as eq, assert_allclose as close
rs = np.random.RandomState(42)
n, d = 50000, 3
P = np.array([[1, .5, .25], [.5, 1, 0], [.25, 0, 1]])
COV = np.dot(P, P.T)
Y = rs.randn(d, n)
a, b, c = np.dot(P, Y)
a, b, c = np.atleast_2d(a).T, np.atleast_2d(b).T, np.atleast_2d(c).T
true_cmutinf = (entropy_gaussian(COV[[[0, 0], [0, 2]], [[0, 2], [2, 2]]]) +
entropy_gaussian(COV[[[1, 1], [1, 2]], [[1, 2], [2, 2]]]) -
entropy_gaussian(COV) - entropy_gaussian(COV[2, 2]))
true_cond_ent = (entropy_gaussian(COV[[[0, 0], [0, 2]], [[0, 2], [2, 2]]]) -
entropy_gaussian(COV[2, 2]))
TRUE_NCMUTINF = true_cmutinf / true_cond_ent
def test_ncmutinf_kde():
close(ncmutinf(3, a, b, c, method='kde'), TRUE_NCMUTINF, atol=.05, rtol=.2)
def test_ncmutinf_knn():
close(ncmutinf(3, a, b, c, method='knn'), TRUE_NCMUTINF, atol=.05, rtol=.2)
def test_ncmutinf_chaowangjost():
close(ncmutinf(8, a, b, c, method='chaowangjost'), TRUE_NCMUTINF, atol=.05,
rtol=.2)
def test_ncmutinf_grassberger():
close(ncmutinf(8, a, b, c, method='grassberger'), TRUE_NCMUTINF, atol=.05,
rtol=.2)
def test_ncmutinf_doanes_rule():
close(ncmutinf(None, a, b, c, method='grassberger'), TRUE_NCMUTINF,
atol=.05, rtol=.4)
def test_ncmutinf_naive():
close(ncmutinf(8, a, b, c, method=None), TRUE_NCMUTINF, atol=.05, rtol=.2)
def test_ncmutinf():
a = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)
b = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)
c = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)
NCMI_REF = (cmutinf(10, a, b, c) /
centropy(10, a, c))
NCMI = ncmutinf(10, a, b, c)
eq(NCMI, NCMI_REF, 5)
def test_fs_tent():
traj1, traj2 = FsPeptide().get().trajectories[:2]
idx = [at.index for at in traj1.topology.atoms
if at.residue.index in [3, 4, 5, 6, 7, 8]]
traj1 = traj1.atom_slice(atom_indices=idx)[::100]
traj2 = traj2.atom_slice(atom_indices=idx)[::100]
traj = (traj1, traj2)
yield _test_tent_alpha, traj
yield _test_tent_contact, traj
yield _test_tent_dihedral, traj
def _test_tent_alpha(traj):
tent = AlphaAngleTransferEntropy()
T = tent.partial_transform(traj)
assert T is not None
def _test_tent_contact(traj):
tent = ContactTransferEntropy()
T = tent.partial_transform(traj)
assert T is not None
def _test_tent_dihedral(traj):
tent = DihedralTransferEntropy()
T = tent.partial_transform(traj)
assert T is not None
_test_tent_shuffle(tent, traj)
def _test_tent_shuffle(tent, traj):
T = tent.partial_transform(traj, shuffle=0)
TS = tent.partial_transform(traj, shuffle=1)
assert T is not None
assert TS is not None
| [
"numpy.atleast_2d",
"msmbuilder.example_datasets.FsPeptide",
"numpy.array",
"numpy.dot",
"numpy.testing.assert_almost_equal",
"numpy.random.RandomState"
] | [((362, 387), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (383, 387), True, 'import numpy as np\n'), ((409, 462), 'numpy.array', 'np.array', (['[[1, 0.5, 0.25], [0.5, 1, 0], [0.25, 0, 1]]'], {}), '([[1, 0.5, 0.25], [0.5, 1, 0], [0.25, 0, 1]])\n', (417, 462), True, 'import numpy as np\n'), ((465, 479), 'numpy.dot', 'np.dot', (['P', 'P.T'], {}), '(P, P.T)\n', (471, 479), True, 'import numpy as np\n'), ((509, 521), 'numpy.dot', 'np.dot', (['P', 'Y'], {}), '(P, Y)\n', (515, 521), True, 'import numpy as np\n'), ((2031, 2052), 'numpy.testing.assert_almost_equal', 'eq', (['NCMI', 'NCMI_REF', '(5)'], {}), '(NCMI, NCMI_REF, 5)\n', (2033, 2052), True, 'from numpy.testing import assert_almost_equal as eq, assert_allclose as close\n'), ((532, 548), 'numpy.atleast_2d', 'np.atleast_2d', (['a'], {}), '(a)\n', (545, 548), True, 'import numpy as np\n'), ((552, 568), 'numpy.atleast_2d', 'np.atleast_2d', (['b'], {}), '(b)\n', (565, 568), True, 'import numpy as np\n'), ((572, 588), 'numpy.atleast_2d', 'np.atleast_2d', (['c'], {}), '(c)\n', (585, 588), True, 'import numpy as np\n'), ((2095, 2106), 'msmbuilder.example_datasets.FsPeptide', 'FsPeptide', ([], {}), '()\n', (2104, 2106), False, 'from msmbuilder.example_datasets import FsPeptide\n')] |
import setuptools
import os
def get_files_in_dir(dirName):
listOfFile = os.listdir(dirName)
completeFileList = list()
for file in listOfFile:
completePath = os.path.join(dirName, file)
if os.path.isdir(completePath):
completeFileList = completeFileList + get_files_in_dir(completePath)
else:
completeFileList.append(completePath)
return completeFileList
def find_json_files():
json_files = []
files = get_files_in_dir(".")
for file in files:
root, extension = os.path.splitext(file)
if extension == ".json":
json_files.append(file)
return json_files
with open("MANIFEST.in", "w") as mfs:
for file in find_json_files():
mfs.write("include " + file + "\n")
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="py-money-legos",
version="0.1.1",
author="<NAME>",
author_email="<EMAIL>",
description="money-legos for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gokhanbaydar/py-money-legos",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
) | [
"os.listdir",
"setuptools.find_packages",
"os.path.join",
"os.path.splitext",
"os.path.isdir"
] | [((78, 97), 'os.listdir', 'os.listdir', (['dirName'], {}), '(dirName)\n', (88, 97), False, 'import os\n'), ((179, 206), 'os.path.join', 'os.path.join', (['dirName', 'file'], {}), '(dirName, file)\n', (191, 206), False, 'import os\n'), ((218, 245), 'os.path.isdir', 'os.path.isdir', (['completePath'], {}), '(completePath)\n', (231, 245), False, 'import os\n'), ((548, 570), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (564, 570), False, 'import os\n'), ((1188, 1214), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1212, 1214), False, 'import setuptools\n')] |
import tensorflow as tf
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from dl_utils.tf.plot_weights import plot_weights
# CUDA GPU
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
def get_stats(X,Y):
print("X : shape : (%d,%d)" % (X.shape), end='')
print(",min : %f, max : %f" % (np.min(X), np.max(X)))
print("Y : shape : (%d,%d)" % (Y.shape), end='')
print(", min : %f, max : %f" % (np.min(Y), np.max(Y)))
def load_data(one_hot=False, nb_classes=10):
from tensorflow.examples.tutorials.mnist import input_data
# load data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=one_hot)
x_train, y_train = mnist.train.images, mnist.train.labels
x_test, y_test = mnist.test.images, mnist.test.labels
x_validation, y_validation = mnist.validation.images, mnist.validation.labels
if not(one_hot):
y_train = tf.keras.utils.to_categorical(y_train, num_classes=nb_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=nb_classes)
y_validation = tf.keras.utils.to_categorical(y_validation, num_classes=nb_classes)
# print stats
print("train : ")
get_stats(x_train, y_train)
print("test : ")
get_stats(x_test, y_test)
print("validation : ")
get_stats(x_validation, y_validation)
return mnist, x_train, y_train, x_test, y_test, x_validation, y_validation
def build_model(use_softmax=False):
model = Sequential()
model.add(Dense(256, input_shape=(None, 784), activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(10))
# softmax
if use_softmax:
model.add(Activation('softmax'))
return model
def main():
print('In main...')
# 1. load the data
data, x_train, y_train, x_test, y_test, x_validation, y_validation = load_data()
# 2. create model
model = build_model()
#3. get logits
x = tf.placeholder(dtype=tf.float32, shape=(None, 784))
y = tf.placeholder(dtype=tf.float32, shape=(None, 10))
logits = model(x)
model.summary()
# 4. get loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)
cost = tf.reduce_sum(cross_entropy)
# 5. Optimization
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
# 6. Performance checks
y_pred = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(y_pred, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 7. session run
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
nb_epochs = 100
nb_batches = 256
for epoch in range(nb_epochs):
avg_cost = 0
# shuffle
x_train, y_train = shuffle(x_train, y_train)
for j in range(0, int(x_train.shape[0]/nb_batches)):
start = j*nb_batches
end = (j+1)*nb_batches
if end > x_train.shape[0]:
end = x_train.shape[0]
x_batch, y_batch = x_train[start:end,:], y_train[start:end,:]
# run optimization on this batch
_, c = sess.run([optimizer,cost], feed_dict={x:x_batch, y:y_batch})
avg_cost += c/nb_batches
# Display results
if epoch % 10 == 0:
acc = sess.run(accuracy, feed_dict={x:x_validation, y:y_validation})
print("Epoch:", '%04d' % (epoch+1),
"cost={:.9f}".format(avg_cost),
"accuracy=", acc)
#layer_weights = model.layers[2].get_weights()[0]
#plot_weights(layer_weights, (10,10), idx=epoch)
print("Optimization finished...")
## 8. Test accuracy
#acc = sess.run(accuracy, feed_dict={x:x_test, y:y_test})
#print("Test accuracy = ", acc)
if __name__ == '__main__' :
main()
| [
"tensorflow.keras.utils.to_categorical",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.Session",
"sklearn.utils.shuffle",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"numpy.max",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.r... | [((750, 807), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': 'one_hot'}), "('MNIST_data/', one_hot=one_hot)\n", (775, 807), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1606, 1618), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1616, 1618), False, 'from tensorflow.keras.models import Sequential\n'), ((2076, 2127), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 784)'}), '(dtype=tf.float32, shape=(None, 784))\n', (2090, 2127), True, 'import tensorflow as tf\n'), ((2136, 2186), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 10)'}), '(dtype=tf.float32, shape=(None, 10))\n', (2150, 2186), True, 'import tensorflow as tf\n'), ((2268, 2335), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'y'}), '(logits=logits, labels=y)\n', (2310, 2335), True, 'import tensorflow as tf\n'), ((2347, 2375), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cross_entropy'], {}), '(cross_entropy)\n', (2360, 2375), True, 'import tensorflow as tf\n'), ((2527, 2548), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2540, 2548), True, 'import tensorflow as tf\n'), ((1050, 1112), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_train'], {'num_classes': 'nb_classes'}), '(y_train, num_classes=nb_classes)\n', (1079, 1112), True, 'import tensorflow as tf\n'), ((1130, 1191), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_test'], {'num_classes': 'nb_classes'}), '(y_test, num_classes=nb_classes)\n', (1159, 1191), True, 'import tensorflow as tf\n'), ((1215, 1282), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_validation'], {'num_classes': 'nb_classes'}), '(y_validation, num_classes=nb_classes)\n', (1244, 1282), True, 'import tensorflow as tf\n'), ((1633, 1687), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'input_shape': '(None, 784)', 'activation': '"""relu"""'}), "(256, input_shape=(None, 784), activation='relu')\n", (1638, 1687), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((1703, 1732), 'tensorflow.keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (1708, 1732), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((1748, 1757), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (1753, 1757), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((2583, 2608), 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2592, 2608), True, 'import tensorflow as tf\n'), ((2610, 2630), 'tensorflow.argmax', 'tf.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (2619, 2630), True, 'import tensorflow as tf\n'), ((2662, 2701), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2669, 2701), True, 'import tensorflow as tf\n'), ((2734, 2746), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2744, 2746), True, 'import tensorflow as tf\n'), ((1812, 1833), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1822, 1833), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((2415, 2469), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2448, 2469), True, 'import tensorflow as tf\n'), ((2773, 2806), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2804, 2806), True, 'import tensorflow as tf\n'), ((2975, 3000), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (2982, 3000), False, 'from sklearn.utils import shuffle\n'), ((476, 485), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (482, 485), True, 'import numpy as np\n'), ((487, 496), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (493, 496), True, 'import numpy as np\n'), ((588, 597), 'numpy.min', 'np.min', (['Y'], {}), '(Y)\n', (594, 597), True, 'import numpy as np\n'), ((599, 608), 'numpy.max', 'np.max', (['Y'], {}), '(Y)\n', (605, 608), True, 'import numpy as np\n')] |
#!/usr/local/bin/python
import pybullet
import time
import pybullet_data
import math, random
import sys
import numpy
import OpenGL
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import ctypes
from OpenGL.GL import shaders
import render.cubeRender as cubeRender
import render.worldRender as worldRender
import render.renderLoop as renderLoop
import world.worldGen as worldGen
import gui.textRender as textRender
import gui.invRender as invRender
import gui.inventory as inventory
# TERRAIN VBO ARRAYS
chunksize = 16
basez = -9
world = {}
view_range = 1
chunk_view_adjustment = 4.0 # This is a slight multiplier to the size of the world so that it doesn't look small when the player walks on it.
for x in range(-5, 5):
for y in range(-5, 5):
chunk = worldGen.worldGen(chunksize)
world[(x,y)] = chunk
print(world.keys())
terrain_vbo = numpy.array([], numpy.float32)
color_vbo = numpy.array([], numpy.float32)
stos = [] # Static Terrain Objects, which appear with the terrain
# Cubes, non-terrain object arrays. Using VBOs for moving objects is laggy.
cubes = []
vertex_array = numpy.array([], numpy.float32)
color_array = numpy.array([], numpy.float32)
# Temporary line to test world rendering.
display = (1200, 720)
def init_libs():
"""Initialize Pybullet and Pygame. Turn on GL's depth test and make the sky blue."""
physicsClient = pybullet.connect(pybullet.DIRECT)
pybullet.setGravity(0,0,-40)
pygame.init()
pygame.display.set_mode(display, HWSURFACE|OPENGL|DOUBLEBUF)
pygame.key.set_repeat(1, 2)
glEnable(GL_DEPTH_TEST)
glClearColor(0.5, 0.6, 1.0, 0.0);
glViewport(0, 0, display[0], display[1])
def setup_world(world, player_chunk_position):
"""Sets up the basic debug world."""
plane = pybullet.createCollisionShape(pybullet.GEOM_PLANE)
pybullet.createMultiBody(0,plane,-1,[0,0,-9])
# Later on my plan is to just generate a world. For now, we need some debug cubes.
cubes.append(cubeRender.createCube([0,12,0], 1, [45,45,45]))
cubes.append(cubeRender.createCube([4,-4,6], 1, [0,0,0]))
cubes.append(cubeRender.createCube([4,5.9,9], 2, [45,30,10]))
addSTO([18,3], 1, [0.6, 0.2, 0.1])
boxestodelete = worldGen.resetWorldBoxes(chunksize, -9, player_chunk_position, world) # We run this once to initiate the first collision boxes.
return boxestodelete
def reset_camera():
"""Resets the camera to the start position. Returns Yaw and Camera Position."""
# These numbers have no significance other than just being near where the cubes and terrain are rendered. (At the Origin)
yaw = 0.0
pitch = 0.0
camerax = -3
cameray = 1
cameraz = -2
# gluLookAt takes 9 arguments, the camera position, the lookat position and the up vector.
# (Just set the up vector to all zeroes except for a 1 for the axis that is upwards)
# gluLookAt also multiplies the "current vector" rather than changing the camera vector because PyOpenGL is stupid.
# Use glLoadIdentity() to stop this.
glLoadIdentity()
gluPerspective(45, (float(display[0])/float(display[1])), 0.1, 100.0)
gluLookAt(camerax,cameray,cameraz, camerax+(math.cos(yaw)*math.cos(pitch)),cameray+(math.sin(yaw)*math.cos(pitch)),(-4)+math.cos(pitch), 0,0,1)
return yaw, pitch, camerax, cameray, cameraz
def create_program():
VERTEX_SHADER = """
attribute vec3 a_Position;
attribute vec3 a_Color;
varying vec4 v_Color;
void main()
{
v_Color = vec4(a_Color, 1.0);
gl_Position = gl_ModelViewMatrix * vec4(a_Position, 1.0);
}
"""
FRAGMENT_SHADER = """
varying vec4 v_Color;
void main()
{
gl_FragColor = v_Color;
}
"""
vertshader = shaders.compileShader(VERTEX_SHADER, GL_VERTEX_SHADER)
fragshader = shaders.compileShader(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
program = glCreateProgram()
glAttachShader(program, vertshader)
glAttachShader(program, fragshader)
glLinkProgram(program)
return program
def create_gui_program():
VERTEX_SHADER = """
attribute vec3 a_Position;
attribute vec3 a_Color;
varying vec4 v_Color;
void main()
{
v_Color = vec4(a_Color, 1.0);
gl_Position = vec4(a_Position, 1.0);
}
"""
FRAGMENT_SHADER = """
varying vec4 v_Color;
void main()
{
gl_FragColor = v_Color;
}
"""
vertshader = shaders.compileShader(VERTEX_SHADER, GL_VERTEX_SHADER)
fragshader = shaders.compileShader(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
program = glCreateProgram()
glAttachShader(program, vertshader)
glAttachShader(program, fragshader)
glLinkProgram(program)
return program
def addVBOVertex(vertex, color):
global terrain_vbo
global color_vbo
terrain_vbo = numpy.append(terrain_vbo, [vertex[0],vertex[1],vertex[2]])
color_vbo = numpy.append(color_vbo, [color[0],color[1],color[2]])
def addSTO(position2d, size, color):
chunk_x = int(math.floor(position2d[0]/chunksize))
chunk_y = int(math.floor(position2d[1]/chunksize))
chunk_position = (chunk_x, chunk_y)
x = int(position2d[0] - (chunk_x*chunksize))
y = int(position2d[1] - (chunk_y*chunksize))
z = len(world[chunk_position][x][y]) + basez + size
stos.append([(position2d[0],position2d[1],z), size, color])
def recalculate_vbos(buffers, player_chunk_position, view_range):
global terrain_vbo
global color_vbo
terrain_vbo = numpy.array([], numpy.float32)
color_vbo = numpy.array([], numpy.float32)
groundpoints, topsoil = worldRender.groundVertices(chunksize, basez, world, player_chunk_position, view_range, chunk_view_adjustment)
for i in range(0,len(groundpoints)):
if topsoil[i] == 0:
addVBOVertex(groundpoints[i],(0.7,0.5,0.2))
elif topsoil[i] == 1:
addVBOVertex(groundpoints[i],(0.3,0.7,0.3))
elif topsoil[i] == 2:
addVBOVertex(groundpoints[i],(0.6,0.6,0.3))
glBindBuffer(GL_ARRAY_BUFFER, buffers[0])
glBufferData(GL_ARRAY_BUFFER, len(terrain_vbo)*4, (ctypes.c_float*len(terrain_vbo))(*terrain_vbo), GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, buffers[1])
glBufferData(GL_ARRAY_BUFFER, len(color_vbo)*4, (ctypes.c_float*len(color_vbo))(*color_vbo), GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
init_libs()
player_chunk_position = (round(-3/chunksize), round(1/chunksize)) # -3 and 1 are the default position of the camera but I need reset camera to come after the world is setup.
last_player_chunk_position = player_chunk_position
boxestodelete = setup_world(world, player_chunk_position)
yaw, pitch, camerax, cameray, cameraz = reset_camera()
program = create_program()
gui_program = create_gui_program()
grab_mouse = False
gui_active = False
buffers = glGenBuffers(2)
recalculate_vbos(buffers, player_chunk_position, view_range)
walkspeed = 0.5
sensitivity = 400.0
text_collection = textRender.TextCollection(display, "gui/textures/")
text_collection.add_text("PyOpenGL Sandbox", 30.0, 0.0, 0.8, True)
prev_pressed = pygame.key.get_pressed()
no_key_timer = 0
gui_v, gui_c = invRender.create_inventory(2,4, display, [])
player_inventory = inventory.create_inv(2,4)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_m] and (no_key_timer > 5 or not prev_pressed[pygame.K_m]):
if grab_mouse:
grab_mouse = False
pygame.mouse.set_visible(True)
else:
grab_mouse = True
pygame.mouse.set_visible(False)
if pressed_keys[pygame.K_w] and not gui_active:
camerax += math.cos(yaw) * walkspeed
cameray += math.sin(yaw) * walkspeed
elif pressed_keys[pygame.K_s] and not gui_active:
camerax -= math.cos(yaw) * walkspeed
cameray -= math.sin(yaw) * walkspeed
if pressed_keys[pygame.K_a] and not gui_active:
camerax += math.cos(yaw+(math.pi/2.0)) * walkspeed
cameray += math.sin(yaw+(math.pi/2.0)) * walkspeed
if pressed_keys[pygame.K_d] and not gui_active:
camerax += math.cos(yaw-(math.pi/2.0)) * walkspeed
cameray += math.sin(yaw-(math.pi/2.0)) * walkspeed
if pressed_keys[pygame.K_SPACE] and not gui_active:
yaw, pitch, camerax, cameray, cameraz = reset_camera()
if pressed_keys[pygame.K_q]:
digx = int(float(camerax)/chunk_view_adjustment)
digy = int(float(cameray)/chunk_view_adjustment)
chunk = world[player_chunk_position]
if digx < len(chunk) -1:
if digy < len(chunk[digx]) -1:
if len(world[player_chunk_position][digx][digy]) != 1:
if world[player_chunk_position][digx][digy][-1] == 1 or world[player_chunk_position][digx][digy][-1] == 0:
inventory.add_to_inv(player_inventory, "dirt")
elif world[player_chunk_position][digx][digy][-1] == 2:
inventory.add_to_inv(player_inventory, "sand")
del world[player_chunk_position][digx][digy][-1]
else:
world[player_chunk_position][digx][digy][-1] = 2
boxestodelete = worldGen.resetWorldBoxes(chunksize, basez, player_chunk_position, world, boxestodelete)
recalculate_vbos(buffers, player_chunk_position, view_range)
if pressed_keys[pygame.K_e]:
digx = int(float(camerax)/chunk_view_adjustment) - player_chunk_position[0]*chunksize
digy = int(float(cameray)/chunk_view_adjustment) - player_chunk_position[1]*chunksize
chunk = world[player_chunk_position]
if digx < len(chunk) -1:
if digy < len(chunk[digx]) -1:
if inventory.inv_contains(player_inventory, "dirt"):
world[player_chunk_position][digx][digy].append(0)
inventory.remove_from_inv(player_inventory, "dirt")
elif inventory.inv_contains(player_inventory, "sand"):
world[player_chunk_position][digx][digy].append(2)
inventory.remove_from_inv(player_inventory, "sand")
boxestodelete = worldGen.resetWorldBoxes(chunksize, basez, player_chunk_position, world, boxestodelete)
recalculate_vbos(buffers, player_chunk_position, view_range)
if pressed_keys[pygame.K_f] and not gui_active:
for cube in cubes:
pybullet.applyExternalForce(cube[0], -1, [0,0,100],[0,0,0],pybullet.LINK_FRAME)
if pressed_keys[pygame.K_i] and (no_key_timer > 5 or not prev_pressed[pygame.K_i]):
gui_v, gui_c = invRender.create_inventory(2,4, display, player_inventory)
if gui_active:
gui_active = False
else:
gui_active = True
no_key_timer = 0
prev_pressed = pressed_keys
elif event.type == pygame.MOUSEMOTION and grab_mouse and not gui_active:
mousemove = pygame.mouse.get_pos()
dyaw = mousemove[0] - (display[0]/2)
dpitch = mousemove[1] - (display[1]/2)
newpitch = pitch - dpitch/float(sensitivity)
yaw -= dyaw/float(sensitivity)
if newpitch > -1.45 and newpitch < 1.45:
pitch = newpitch
pygame.mouse.set_pos((display[0]/2),(display[1]/2))
# Step Physics Simulation
pybullet.stepSimulation()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
player_chunk_position = (round(camerax/(chunksize*chunk_view_adjustment)), round(cameray/(chunksize*chunk_view_adjustment)))
if player_chunk_position != last_player_chunk_position:
boxestodelete = worldGen.resetWorldBoxes(chunksize, basez, player_chunk_position, world, boxestodelete)
recalculate_vbos(buffers, player_chunk_position, view_range)
last_player_chunk_position = player_chunk_position
glLoadIdentity()
gluPerspective(45, (float(display[0])/float(display[1])), 0.1, 100.0)
gluLookAt(camerax,cameray,cameraz, camerax+(math.cos(yaw)*math.cos(pitch)),cameray+(math.sin(yaw)*math.cos(pitch)),cameraz+math.sin(pitch), 0,0,1)
renderLoop.vbo_render(program, buffers, len(terrain_vbo)/3)
renderLoop.render_loop(program, cubes)
for sto in stos:
renderLoop.static_render_loop(program, sto[0], sto[1], sto[2])
#text_collection.render() #Laggy and problematic
if gui_active:
renderLoop.gui_render(gui_program, gui_v, gui_c)
pygame.display.flip()
pygame.time.wait(10)
no_key_timer += 1
| [
"pygame.init",
"pygame.quit",
"math.floor",
"pybullet.setGravity",
"math.cos",
"numpy.array",
"gui.inventory.inv_contains",
"pybullet.createCollisionShape",
"gui.inventory.add_to_inv",
"render.renderLoop.gui_render",
"gui.inventory.remove_from_inv",
"pygame.mouse.set_pos",
"pygame.display.se... | [((889, 919), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (900, 919), False, 'import numpy\n'), ((932, 962), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (943, 962), False, 'import numpy\n'), ((1135, 1165), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (1146, 1165), False, 'import numpy\n'), ((1180, 1210), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (1191, 1210), False, 'import numpy\n'), ((6666, 6717), 'gui.textRender.TextCollection', 'textRender.TextCollection', (['display', '"""gui/textures/"""'], {}), "(display, 'gui/textures/')\n", (6691, 6717), True, 'import gui.textRender as textRender\n'), ((6801, 6825), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (6823, 6825), False, 'import pygame\n'), ((6859, 6904), 'gui.invRender.create_inventory', 'invRender.create_inventory', (['(2)', '(4)', 'display', '[]'], {}), '(2, 4, display, [])\n', (6885, 6904), True, 'import gui.invRender as invRender\n'), ((6923, 6949), 'gui.inventory.create_inv', 'inventory.create_inv', (['(2)', '(4)'], {}), '(2, 4)\n', (6943, 6949), True, 'import gui.inventory as inventory\n'), ((1397, 1430), 'pybullet.connect', 'pybullet.connect', (['pybullet.DIRECT'], {}), '(pybullet.DIRECT)\n', (1413, 1430), False, 'import pybullet\n'), ((1432, 1462), 'pybullet.setGravity', 'pybullet.setGravity', (['(0)', '(0)', '(-40)'], {}), '(0, 0, -40)\n', (1451, 1462), False, 'import pybullet\n'), ((1464, 1477), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1475, 1477), False, 'import pygame\n'), ((1479, 1543), 'pygame.display.set_mode', 'pygame.display.set_mode', (['display', '(HWSURFACE | OPENGL | DOUBLEBUF)'], {}), '(display, HWSURFACE | OPENGL | DOUBLEBUF)\n', (1502, 1543), False, 'import pygame\n'), ((1541, 1568), 'pygame.key.set_repeat', 'pygame.key.set_repeat', (['(1)', '(2)'], {}), '(1, 2)\n', (1562, 1568), False, 'import pygame\n'), ((1769, 1819), 'pybullet.createCollisionShape', 'pybullet.createCollisionShape', (['pybullet.GEOM_PLANE'], {}), '(pybullet.GEOM_PLANE)\n', (1798, 1819), False, 'import pybullet\n'), ((1821, 1871), 'pybullet.createMultiBody', 'pybullet.createMultiBody', (['(0)', 'plane', '(-1)', '[0, 0, -9]'], {}), '(0, plane, -1, [0, 0, -9])\n', (1845, 1871), False, 'import pybullet\n'), ((2192, 2261), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', '(-9)', 'player_chunk_position', 'world'], {}), '(chunksize, -9, player_chunk_position, world)\n', (2216, 2261), True, 'import world.worldGen as worldGen\n'), ((3621, 3675), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['VERTEX_SHADER', 'GL_VERTEX_SHADER'], {}), '(VERTEX_SHADER, GL_VERTEX_SHADER)\n', (3642, 3675), False, 'from OpenGL.GL import shaders\n'), ((3690, 3748), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['FRAGMENT_SHADER', 'GL_FRAGMENT_SHADER'], {}), '(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)\n', (3711, 3748), False, 'from OpenGL.GL import shaders\n'), ((4248, 4302), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['VERTEX_SHADER', 'GL_VERTEX_SHADER'], {}), '(VERTEX_SHADER, GL_VERTEX_SHADER)\n', (4269, 4302), False, 'from OpenGL.GL import shaders\n'), ((4317, 4375), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['FRAGMENT_SHADER', 'GL_FRAGMENT_SHADER'], {}), '(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)\n', (4338, 4375), False, 'from OpenGL.GL import shaders\n'), ((4613, 4673), 'numpy.append', 'numpy.append', (['terrain_vbo', '[vertex[0], vertex[1], vertex[2]]'], {}), '(terrain_vbo, [vertex[0], vertex[1], vertex[2]])\n', (4625, 4673), False, 'import numpy\n'), ((4685, 4740), 'numpy.append', 'numpy.append', (['color_vbo', '[color[0], color[1], color[2]]'], {}), '(color_vbo, [color[0], color[1], color[2]])\n', (4697, 4740), False, 'import numpy\n'), ((5255, 5285), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (5266, 5285), False, 'import numpy\n'), ((5299, 5329), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (5310, 5329), False, 'import numpy\n'), ((5357, 5470), 'render.worldRender.groundVertices', 'worldRender.groundVertices', (['chunksize', 'basez', 'world', 'player_chunk_position', 'view_range', 'chunk_view_adjustment'], {}), '(chunksize, basez, world, player_chunk_position,\n view_range, chunk_view_adjustment)\n', (5383, 5470), True, 'import render.worldRender as worldRender\n'), ((6976, 6994), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6992, 6994), False, 'import pygame\n'), ((10736, 10761), 'pybullet.stepSimulation', 'pybullet.stepSimulation', ([], {}), '()\n', (10759, 10761), False, 'import pybullet\n'), ((11527, 11565), 'render.renderLoop.render_loop', 'renderLoop.render_loop', (['program', 'cubes'], {}), '(program, cubes)\n', (11549, 11565), True, 'import render.renderLoop as renderLoop\n'), ((11769, 11790), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (11788, 11790), False, 'import pygame\n'), ((11792, 11812), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (11808, 11812), False, 'import pygame\n'), ((801, 829), 'world.worldGen.worldGen', 'worldGen.worldGen', (['chunksize'], {}), '(chunksize)\n', (818, 829), True, 'import world.worldGen as worldGen\n'), ((1966, 2016), 'render.cubeRender.createCube', 'cubeRender.createCube', (['[0, 12, 0]', '(1)', '[45, 45, 45]'], {}), '([0, 12, 0], 1, [45, 45, 45])\n', (1987, 2016), True, 'import render.cubeRender as cubeRender\n'), ((2028, 2075), 'render.cubeRender.createCube', 'cubeRender.createCube', (['[4, -4, 6]', '(1)', '[0, 0, 0]'], {}), '([4, -4, 6], 1, [0, 0, 0])\n', (2049, 2075), True, 'import render.cubeRender as cubeRender\n'), ((2087, 2138), 'render.cubeRender.createCube', 'cubeRender.createCube', (['[4, 5.9, 9]', '(2)', '[45, 30, 10]'], {}), '([4, 5.9, 9], 2, [45, 30, 10])\n', (2108, 2138), True, 'import render.cubeRender as cubeRender\n'), ((4793, 4830), 'math.floor', 'math.floor', (['(position2d[0] / chunksize)'], {}), '(position2d[0] / chunksize)\n', (4803, 4830), False, 'import math, random\n'), ((4845, 4882), 'math.floor', 'math.floor', (['(position2d[1] / chunksize)'], {}), '(position2d[1] / chunksize)\n', (4855, 4882), False, 'import math, random\n'), ((11019, 11110), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', 'basez', 'player_chunk_position', 'world', 'boxestodelete'], {}), '(chunksize, basez, player_chunk_position, world,\n boxestodelete)\n', (11043, 11110), True, 'import world.worldGen as worldGen\n'), ((11586, 11648), 'render.renderLoop.static_render_loop', 'renderLoop.static_render_loop', (['program', 'sto[0]', 'sto[1]', 'sto[2]'], {}), '(program, sto[0], sto[1], sto[2])\n', (11615, 11648), True, 'import render.renderLoop as renderLoop\n'), ((11717, 11765), 'render.renderLoop.gui_render', 'renderLoop.gui_render', (['gui_program', 'gui_v', 'gui_c'], {}), '(gui_program, gui_v, gui_c)\n', (11738, 11765), True, 'import render.renderLoop as renderLoop\n'), ((3183, 3198), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (3191, 3198), False, 'import math, random\n'), ((7031, 7044), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7042, 7044), False, 'import pygame\n'), ((11439, 11454), 'math.sin', 'math.sin', (['pitch'], {}), '(pitch)\n', (11447, 11454), False, 'import math, random\n'), ((3107, 3120), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (3115, 3120), False, 'import math, random\n'), ((3121, 3136), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (3129, 3136), False, 'import math, random\n'), ((3147, 3160), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (3155, 3160), False, 'import math, random\n'), ((3161, 3176), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (3169, 3176), False, 'import math, random\n'), ((7110, 7134), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (7132, 7134), False, 'import pygame\n'), ((11360, 11373), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (11368, 11373), False, 'import math, random\n'), ((11374, 11389), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (11382, 11389), False, 'import math, random\n'), ((11400, 11413), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (11408, 11413), False, 'import math, random\n'), ((11414, 11429), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (11422, 11429), False, 'import math, random\n'), ((8830, 8921), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', 'basez', 'player_chunk_position', 'world', 'boxestodelete'], {}), '(chunksize, basez, player_chunk_position, world,\n boxestodelete)\n', (8854, 8921), True, 'import world.worldGen as worldGen\n'), ((9680, 9771), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', 'basez', 'player_chunk_position', 'world', 'boxestodelete'], {}), '(chunksize, basez, player_chunk_position, world,\n boxestodelete)\n', (9704, 9771), True, 'import world.worldGen as worldGen\n'), ((10098, 10157), 'gui.invRender.create_inventory', 'invRender.create_inventory', (['(2)', '(4)', 'display', 'player_inventory'], {}), '(2, 4, display, player_inventory)\n', (10124, 10157), True, 'import gui.invRender as invRender\n'), ((10382, 10404), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (10402, 10404), False, 'import pygame\n'), ((10649, 10701), 'pygame.mouse.set_pos', 'pygame.mouse.set_pos', (['(display[0] / 2)', '(display[1] / 2)'], {}), '(display[0] / 2, display[1] / 2)\n', (10669, 10701), False, 'import pygame\n'), ((7274, 7304), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (7298, 7304), False, 'import pygame\n'), ((7343, 7374), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (7367, 7374), False, 'import pygame\n'), ((7441, 7454), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (7449, 7454), False, 'import math, random\n'), ((7483, 7496), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (7491, 7496), False, 'import math, random\n'), ((7710, 7739), 'math.cos', 'math.cos', (['(yaw + math.pi / 2.0)'], {}), '(yaw + math.pi / 2.0)\n', (7718, 7739), False, 'import math, random\n'), ((7766, 7795), 'math.sin', 'math.sin', (['(yaw + math.pi / 2.0)'], {}), '(yaw + math.pi / 2.0)\n', (7774, 7795), False, 'import math, random\n'), ((7872, 7901), 'math.cos', 'math.cos', (['(yaw - math.pi / 2.0)'], {}), '(yaw - math.pi / 2.0)\n', (7880, 7901), False, 'import math, random\n'), ((7928, 7957), 'math.sin', 'math.sin', (['(yaw - math.pi / 2.0)'], {}), '(yaw - math.pi / 2.0)\n', (7936, 7957), False, 'import math, random\n'), ((9912, 10002), 'pybullet.applyExternalForce', 'pybullet.applyExternalForce', (['cube[0]', '(-1)', '[0, 0, 100]', '[0, 0, 0]', 'pybullet.LINK_FRAME'], {}), '(cube[0], -1, [0, 0, 100], [0, 0, 0], pybullet.\n LINK_FRAME)\n', (9939, 10002), False, 'import pybullet\n'), ((7577, 7590), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (7585, 7590), False, 'import math, random\n'), ((7618, 7631), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (7626, 7631), False, 'import math, random\n'), ((9310, 9358), 'gui.inventory.inv_contains', 'inventory.inv_contains', (['player_inventory', '"""dirt"""'], {}), "(player_inventory, 'dirt')\n", (9332, 9358), True, 'import gui.inventory as inventory\n'), ((9425, 9476), 'gui.inventory.remove_from_inv', 'inventory.remove_from_inv', (['player_inventory', '"""dirt"""'], {}), "(player_inventory, 'dirt')\n", (9450, 9476), True, 'import gui.inventory as inventory\n'), ((9488, 9536), 'gui.inventory.inv_contains', 'inventory.inv_contains', (['player_inventory', '"""sand"""'], {}), "(player_inventory, 'sand')\n", (9510, 9536), True, 'import gui.inventory as inventory\n'), ((8509, 8555), 'gui.inventory.add_to_inv', 'inventory.add_to_inv', (['player_inventory', '"""dirt"""'], {}), "(player_inventory, 'dirt')\n", (8529, 8555), True, 'import gui.inventory as inventory\n'), ((9603, 9654), 'gui.inventory.remove_from_inv', 'inventory.remove_from_inv', (['player_inventory', '"""sand"""'], {}), "(player_inventory, 'sand')\n", (9628, 9654), True, 'import gui.inventory as inventory\n'), ((8627, 8673), 'gui.inventory.add_to_inv', 'inventory.add_to_inv', (['player_inventory', '"""sand"""'], {}), "(player_inventory, 'sand')\n", (8647, 8673), True, 'import gui.inventory as inventory\n')] |
import random
import string
import unittest
import warnings
from libs import jenkinslib
from libs.JAF.BaseCommandLineParser import BaseCommandLineParser
from libs.JAF.plugin_CreateAPIToken import CreateAPIToken, CreateAPITokenParser
from libs.JAF.plugin_DeleteAPIToken import DeleteAPIToken, DeleteAPITokenParser
from libs.JAF.plugin_ListAPITokens import ListAPITokens, ListAPITokensParser
from .configuration import (
server,
user_admin,
user_bad,
user_noaccess,
user_normal,
user_read_job_access,
user_read_no_job_access,
)
from .helpers import DummyWebServer, TestFramework
class CreateAPITokenTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "CreateAPIToken"
self.TestParserClass = CreateAPITokenParser
self.TestClass = CreateAPIToken
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class CreateAPITokenParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class DeleteAPITokenTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "DeleteAPIToken"
self.TestParserClass = DeleteAPITokenParser
self.TestClass = DeleteAPIToken
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class DeleteAPITokenParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class ListAPITokensTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "ListAPITokens"
self.TestParserClass = ListAPITokensParser
self.TestClass = ListAPITokens
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Current API Tokens:"],
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class ListAPITokensParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class CombinedAPITokenNormalUserCredentialsTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.token_name,
],
[r"Token Deleted Successfully."],
)
# For now this is commented out because we can only test this on a cloudbees federated setup, which we don't have
'''
class CombinedAPITokenNormalUserCookieTest(unittest.TestCase, TestFramework):
"""
We need to specifically test auth with cookies because code has to do extra work to derive the logged-in user's username
"""
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
try:
js = jenkinslib.Jenkins(
server,
username=user_read_no_job_access.split(':')[0],
password=':'.join(user_read_no_job_access.split(':')[1:]),
timeout=30,
)
cls.cookie = js.get_cookie()
except Exception:
print(cls.cookie)
#Failure will cause tests to fail, so we ignore here
pass
def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
self.token_name,
],
[r"Token Deleted Successfully."],
)
'''
class CombinedAPITokenAdminUserTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
def test_1_valid_jenkins_valid_admin_creds_token_create_other_user(self):
"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_admin_creds_token_list_other_user(self):
"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_admin_creds_token_delete_list_other_user(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_admin_creds_token_delete_other_user(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
self.token_name,
],
[r"Token Deleted Successfully."],
)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"warnings.simplefilter",
"random.choices"
] | [((20897, 20912), 'unittest.main', 'unittest.main', ([], {}), '()\n', (20910, 20912), False, 'import unittest\n'), ((698, 746), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'ResourceWarning'], {}), "('ignore', ResourceWarning)\n", (719, 746), False, 'import warnings\n'), ((4261, 4309), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'ResourceWarning'], {}), "('ignore', ResourceWarning)\n", (4282, 4309), False, 'import warnings\n'), ((7823, 7871), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'ResourceWarning'], {}), "('ignore', ResourceWarning)\n", (7844, 7871), False, 'import warnings\n'), ((11978, 12036), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': '(26)'}), '(string.ascii_letters + string.digits, k=26)\n', (11992, 12036), False, 'import random\n'), ((18071, 18129), 'random.choices', 'random.choices', (['(string.ascii_letters + string.digits)'], {'k': '(26)'}), '(string.ascii_letters + string.digits, k=26)\n', (18085, 18129), False, 'import random\n')] |
import pytest
from ethereum import tester
@pytest.mark.xfail
def test_get_block_by_hash(rpc_server, rpc_client, eth_coinbase):
block_number = rpc_client.get_block_number()
assert block_number == 0
to_addr = "0x" + tester.encode_hex(tester.accounts[1])
txn_hash = rpc_client.send_transaction(_from=eth_coinbase, to=to_addr, value=100)
assert txn_hash
txn_receipt = rpc_client.get_transaction_receipt(txn_hash)
block_hash = txn_receipt['blockHash']
block = rpc_client.get_block_by_hash(block_hash)
assert block
| [
"ethereum.tester.encode_hex"
] | [((230, 267), 'ethereum.tester.encode_hex', 'tester.encode_hex', (['tester.accounts[1]'], {}), '(tester.accounts[1])\n', (247, 267), False, 'from ethereum import tester\n')] |
import numpy as np
import tensorflow as tf
from gym import utils
from gym.envs.mujoco import mujoco_env
from meta_mb.meta_envs.base import MetaEnv
class InvertedPendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle, MetaEnv):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'inverted_pendulum.xml', 2)
def step(self, a):
# reward = 1.0
reward = self._get_reward()
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
# notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= .2)
# done = not notdone
done = False
return ob, reward, done, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.01, high=0.01)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.01, high=0.01)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_reward(self):
old_ob = self._get_obs()
reward = -((old_ob[1]) ** 2)
return reward
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent
def reward(self, obs, acts, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == acts.shape[0]
return -(obs[:, 1]) ** 2
def tf_reward(self, obs, acts, next_obs):
return - tf.square(obs[:, 1])
if __name__ == "__main__":
env = InvertedPendulumEnv()
env.reset()
for _ in range(1000):
_ = env.render()
ob, rew, done, info = env.step(env.action_space.sample()) # take a random action
| [
"numpy.concatenate",
"gym.utils.EzPickle.__init__",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"tensorflow.square"
] | [((257, 286), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (280, 286), False, 'from gym import utils\n'), ((295, 358), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', '"""inverted_pendulum.xml"""', '(2)'], {}), "(self, 'inverted_pendulum.xml', 2)\n", (324, 358), False, 'from gym.envs.mujoco import mujoco_env\n'), ((1574, 1594), 'tensorflow.square', 'tf.square', (['obs[:, 1]'], {}), '(obs[:, 1])\n', (1583, 1594), True, 'import tensorflow as tf\n'), ((1120, 1176), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos, self.sim.data.qvel]'], {}), '([self.sim.data.qpos, self.sim.data.qvel])\n', (1134, 1176), True, 'import numpy as np\n')] |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""CoreV1EventListDict generated type."""
from typing import TypedDict, List
from kubernetes_typed.client import CoreV1EventDict, V1ListMetaDict
CoreV1EventListDict = TypedDict(
"CoreV1EventListDict",
{
"apiVersion": str,
"items": List[CoreV1EventDict],
"kind": str,
"metadata": V1ListMetaDict,
},
total=False,
)
| [
"typing.TypedDict"
] | [((218, 362), 'typing.TypedDict', 'TypedDict', (['"""CoreV1EventListDict"""', "{'apiVersion': str, 'items': List[CoreV1EventDict], 'kind': str, 'metadata':\n V1ListMetaDict}"], {'total': '(False)'}), "('CoreV1EventListDict', {'apiVersion': str, 'items': List[\n CoreV1EventDict], 'kind': str, 'metadata': V1ListMetaDict}, total=False)\n", (227, 362), False, 'from typing import TypedDict, List\n')] |
#!/usr/bin/env python3
# vim: set ft=python:sw=4:ts=4
import os
import sys
# This location is set within the Dockerfile.
sys.path.insert(0, '/opt/infra/lib')
from infra import (
load_definitions_file,
parse_args,
get_org_repo,
cleanup_boilerplate,
write_tf_backend_file,
write_tfvars_file,
run_terraform,
save_outputs,
write_awstf_file,
)
if __name__ == '__main__':
# TODO: Ensure the AWS envvars are set
GLOBALS, SECTIONS = load_definitions_file()
args = parse_args(
legal_sections=SECTIONS.keys(),
)
# TODO: Handle the None,None and the x,'' cases
org, repo = get_org_repo()
# Set ourselves in the right directory. This simplifies the rest of the code
# The directory is either specified in the SECTIONS definition or defaults
# to the section name.
os.chdir(SECTIONS[args.section].get('subdir', args.section))
cleanup_boilerplate()
# There are a very few cases where we don't want to write a TF backend file.
# Specifically, when we're creating the TF backend in the first place.
if not args.no_backend:
write_tf_backend_file(
region=GLOBALS['region'],
bucket=GLOBALS['backend']['bucket_name'],
dynamodb_table=GLOBALS['backend']['dynamodb_table'],
org=org,
repo=repo,
environment=args.environment,
section=args.section,
)
section_values = SECTIONS.get(args.section, {}).get('inputs', {})
tfvars_filename = write_tfvars_file(
GLOBALS=GLOBALS,
# These are the values that all sections must handle
global_values={
"environment": args.environment,
# This will be used by the boilerplate aws.tf file
"region": section_values.get('region', GLOBALS['region']),
},
section_values=section_values,
org=org,
repo=repo,
environment=args.environment,
)
write_awstf_file()
# TODO: Generate the boilerplate aws.tf file with the region variable
# The output subcommand's STDOUT needs to be parseable as JSON.
suppress_verbiage = False
if args.subcmd == 'output':
suppress_verbiage = True
# Always run "terraform init". This is safe.
run_terraform('init',
reconfigure=args.reconfigure,
tfvars_filename=tfvars_filename,
suppress_verbiage=suppress_verbiage,
)
options = []
suppress_input = True
# Force -auto-approve otherwise terraform apply/destroy will error out.
if args.subcmd == 'apply':
options.append('-auto-approve')
elif args.subcmd == 'destroy':
options.append('-auto-approve')
elif args.subcmd == 'output':
# The output subcommand cannot handle the -var-file parameter.
tfvars_filename = None
suppress_input = False
# Always display outputs in JSON
options.append('-json')
# Run the command we were asked to run.
rv = run_terraform(args.subcmd,
options=options,
suppress_input=suppress_input,
tfvars_filename=tfvars_filename,
suppress_verbiage=suppress_verbiage,
)
# TODO: Do something here with rv - it's a CompletedProcess object
# q.v. https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess
# TODO: Add a remove_outputs() to be called when destroying
# TODO: Add a read_outputs() to be used when reading
if args.subcmd == 'apply':
save_outputs(
bucket=GLOBALS['backend']['bucket_name'],
org=org,
repo=repo,
environment=args.environment,
section=args.section,
)
cleanup_boilerplate()
# Scripts should be clear when they succeed. A visual statement is helpful.
if not suppress_verbiage:
print("Ok", flush=True)
| [
"infra.write_awstf_file",
"sys.path.insert",
"infra.write_tf_backend_file",
"infra.run_terraform",
"infra.save_outputs",
"infra.get_org_repo",
"infra.cleanup_boilerplate",
"infra.load_definitions_file"
] | [((123, 159), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/opt/infra/lib"""'], {}), "(0, '/opt/infra/lib')\n", (138, 159), False, 'import sys\n'), ((474, 497), 'infra.load_definitions_file', 'load_definitions_file', ([], {}), '()\n', (495, 497), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((637, 651), 'infra.get_org_repo', 'get_org_repo', ([], {}), '()\n', (649, 651), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((910, 931), 'infra.cleanup_boilerplate', 'cleanup_boilerplate', ([], {}), '()\n', (929, 931), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((1972, 1990), 'infra.write_awstf_file', 'write_awstf_file', ([], {}), '()\n', (1988, 1990), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((2284, 2410), 'infra.run_terraform', 'run_terraform', (['"""init"""'], {'reconfigure': 'args.reconfigure', 'tfvars_filename': 'tfvars_filename', 'suppress_verbiage': 'suppress_verbiage'}), "('init', reconfigure=args.reconfigure, tfvars_filename=\n tfvars_filename, suppress_verbiage=suppress_verbiage)\n", (2297, 2410), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((2998, 3146), 'infra.run_terraform', 'run_terraform', (['args.subcmd'], {'options': 'options', 'suppress_input': 'suppress_input', 'tfvars_filename': 'tfvars_filename', 'suppress_verbiage': 'suppress_verbiage'}), '(args.subcmd, options=options, suppress_input=suppress_input,\n tfvars_filename=tfvars_filename, suppress_verbiage=suppress_verbiage)\n', (3011, 3146), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((3705, 3726), 'infra.cleanup_boilerplate', 'cleanup_boilerplate', ([], {}), '()\n', (3724, 3726), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((1125, 1348), 'infra.write_tf_backend_file', 'write_tf_backend_file', ([], {'region': "GLOBALS['region']", 'bucket': "GLOBALS['backend']['bucket_name']", 'dynamodb_table': "GLOBALS['backend']['dynamodb_table']", 'org': 'org', 'repo': 'repo', 'environment': 'args.environment', 'section': 'args.section'}), "(region=GLOBALS['region'], bucket=GLOBALS['backend'][\n 'bucket_name'], dynamodb_table=GLOBALS['backend']['dynamodb_table'],\n org=org, repo=repo, environment=args.environment, section=args.section)\n", (1146, 1348), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n'), ((3502, 3632), 'infra.save_outputs', 'save_outputs', ([], {'bucket': "GLOBALS['backend']['bucket_name']", 'org': 'org', 'repo': 'repo', 'environment': 'args.environment', 'section': 'args.section'}), "(bucket=GLOBALS['backend']['bucket_name'], org=org, repo=repo,\n environment=args.environment, section=args.section)\n", (3514, 3632), False, 'from infra import load_definitions_file, parse_args, get_org_repo, cleanup_boilerplate, write_tf_backend_file, write_tfvars_file, run_terraform, save_outputs, write_awstf_file\n')] |
import requests
from crypto_package.conf import service_config as conf
def subscribe_on_topics(currency_pairs:[str], ticker:str, exchange:str):
args = _make_sub_args(currency_pairs, ticker, exchange)
try:
res = requests.post(conf.CANDLE_DATA_SERVICE + conf.EP_SUBSCRIBE, json=args)
except requests.ConnectionError as e:
print("CONNECTION ERROR OCCURRED "+str(e))
return False
if res.status_code != 200:
print("Some exception occurred while connecting to server." + str(res))
return False
return True
def unsubscribe_on_topics(currency_pairs: [str], ticker: str, exchange: str):
args = _make_sub_args(currency_pairs, ticker, exchange)
try:
res = requests.post(conf.CANDLE_DATA_SERVICE + conf.EP_UNSUBSCRIBE, json=args)
except requests.ConnectionError as e:
print("CONNECTION ERROR OCCURRED "+str(e))
return False
if res.status_code != 200:
print("Some exception occurred while connecting to server." + str(res))
return False
return True
def _make_sub_args(currency_pairs: [str], ticker: str, exchange: str):
return {'exchanges': {
exchange: {
ticker: currency_pairs
}
}
}
| [
"requests.post"
] | [((230, 300), 'requests.post', 'requests.post', (['(conf.CANDLE_DATA_SERVICE + conf.EP_SUBSCRIBE)'], {'json': 'args'}), '(conf.CANDLE_DATA_SERVICE + conf.EP_SUBSCRIBE, json=args)\n', (243, 300), False, 'import requests\n'), ((728, 800), 'requests.post', 'requests.post', (['(conf.CANDLE_DATA_SERVICE + conf.EP_UNSUBSCRIBE)'], {'json': 'args'}), '(conf.CANDLE_DATA_SERVICE + conf.EP_UNSUBSCRIBE, json=args)\n', (741, 800), False, 'import requests\n')] |
import unittest
import numpy as np
import os
from optimising import optimise
def test_initial_matrix():
matrix = np.load(os.path.join('initialmatrices', 'U_initial.npy'))
assert len(matrix) == 100
def test_turing_pattern():
pattern = np.load(os.path.join('data', '1.0.npy'))
matrix = np.load(os.path.join('initialmatrices', 'U_initial.npy'))
assert pattern[0][0] != matrix[0][0] or pattern [0][1] != matrix[0][1]
def test_make_noise():
pattern = np.load(os.path.join('data', '1.0.npy'))
noisy = np.load(os.path.join('noisy_data', '1.0.npy'))
equal = 0
for i in range(10):
if pattern[i][i] == noisy[i][i]:
equal += 1
assert equal < 2
def test_optimisation():
a_est, b_est = optimise(time=1.0, a_initial=0.0, a_final=1e-3, b_initial=0.0, b_final=1e-2,
iters=5)
assert 1e-4 < a_est < 1e-3 and 1e-3 < b_est < 1e-2
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"optimising.optimise",
"os.path.join"
] | [((742, 833), 'optimising.optimise', 'optimise', ([], {'time': '(1.0)', 'a_initial': '(0.0)', 'a_final': '(0.001)', 'b_initial': '(0.0)', 'b_final': '(0.01)', 'iters': '(5)'}), '(time=1.0, a_initial=0.0, a_final=0.001, b_initial=0.0, b_final=\n 0.01, iters=5)\n', (750, 833), False, 'from optimising import optimise\n'), ((943, 958), 'unittest.main', 'unittest.main', ([], {}), '()\n', (956, 958), False, 'import unittest\n'), ((127, 175), 'os.path.join', 'os.path.join', (['"""initialmatrices"""', '"""U_initial.npy"""'], {}), "('initialmatrices', 'U_initial.npy')\n", (139, 175), False, 'import os\n'), ((257, 288), 'os.path.join', 'os.path.join', (['"""data"""', '"""1.0.npy"""'], {}), "('data', '1.0.npy')\n", (269, 288), False, 'import os\n'), ((311, 359), 'os.path.join', 'os.path.join', (['"""initialmatrices"""', '"""U_initial.npy"""'], {}), "('initialmatrices', 'U_initial.npy')\n", (323, 359), False, 'import os\n'), ((482, 513), 'os.path.join', 'os.path.join', (['"""data"""', '"""1.0.npy"""'], {}), "('data', '1.0.npy')\n", (494, 513), False, 'import os\n'), ((535, 572), 'os.path.join', 'os.path.join', (['"""noisy_data"""', '"""1.0.npy"""'], {}), "('noisy_data', '1.0.npy')\n", (547, 572), False, 'import os\n')] |
from torch_geometric.nn import GCNConv, ChebConv
#from torch_geometric.nn import SAGEConv
import torch
from torch.nn import BatchNorm1d
import torch.nn.functional as F
import os
os.path.abspath(__file__)
import sys
sys.path.append(".")
from .phygeograph import PhyGeoGrapH
from torch.autograd import grad
class PhyGeoGrapHPDE(torch.nn.Module):
r"""Physics-based Graph Hybrid Neural Network with PDE residual
from the `"Physics-aware deep graph learning for
air quality assessment" to be published in PNAS`_ paper
Args:
in_channels (int or tuple): Size of each input sample. A tuple
corresponds to the sizes of source and target dimensionalities.
ngcnode (int or tuple): The number of features for each local graph convolution layer.
out_channels (int): The number of output features.
nnei (int): The number of local graph convolutions.
autolayersNo (int, optional): The number of hidden layers in full deep network.
weightedmean (bool, optional): If set to :obj:`True`, the weights will be used in graph convolution operations.
gcnout (int, optional): The number of the output features of the last graph convolution layer.
paraout (int, optional): The number of the coefficients for the parameters. (default: :ints:`5`).
nattlayer (int, optional): The number of attention layers. (default: :ints:`4`).
vm_lim (tuple of float, optional): The lower and upper limits for velocity variable. (default: :float:`(-100000,100000)`).
kd_lim (tuple of float, optional): The lower and upper limits for difussion coefficient. (default: :float:`(-100000,100000)`).
pC_lim (tuple of float, optional): The lower and upper limits for difussion coefficient. (default: :float:`(0.0,1.0)`).
The residual is defined as
.. math::
\mathbf{e}_2= \frac {\partial \tilde{\mathbf{C}}} {\partial \mathit{d}}\
+\frac {\partial \tilde{\mathbf{C}}} {\partial \mathit{l}_x} \mathit{v}_{\mathit{l}_x} + \
\frac {\partial \tilde{\mathbf{C}}} {\partial \mathit{l}_y} \mathit{v}_{\mathit{l}_y} - \mathit{pC}} - \
(\frac {\partial^2 \tilde{\mathbf{C}}} {\partial^2 \mathit{l}_x} +(\frac {\partial \tilde{\mathbf{C}}} {\partial \mathit{l}_x})^2)\mathit{p}_{\mathit{l}_x} - \
(\frac {\partial^2 \tilde{\mathbf{C}}} {\partial^2 \mathit{l}_y} +(\frac {\partial \tilde{\mathbf{C}}} {\partial \mathit{l}_y})^2)\mathit{p}_{\mathit{l}_y}
"""
def __init__(self, in_channels, ngcnode, out_channels, nnei, autolayersNo, weightedmean, gcnout,paraout=5,
nattlayer=4,vm_lim=(-100000.0,100000.0),kd_lim=(-100000.0,100000.0),pC_lim=(0.0,1.0)):
super(PhyGeoGrapHPDE, self).__init__()
self.autolayers = torch.nn.ModuleList()
self.bn = torch.nn.ModuleList()
self.atts = torch.nn.ModuleList()
self.attsbn = torch.nn.ModuleList()
self.vm_lim=vm_lim
self.kd_lim=kd_lim
self.gcnmodel = PhyGeoGrapH(in_channels, ngcnode, out_channels, nnei, autolayersNo, weightedmean=weightedmean, gcnout=gcnout,
nattlayer=nattlayer,vm_lim=vm_lim,kd_lim=kd_lim,pC=pC_lim)
if autolayersNo is not None:
if nattlayer is not None:
for i in range(nattlayer):
self.atts.append(torch.nn.Linear(in_channels , in_channels))
self.attsbn.append(torch.nn.BatchNorm1d(in_channels))
self.autolayers.append(torch.nn.Linear(in_channels, autolayersNo[0]))
self.bn.append(torch.nn.BatchNorm1d(autolayersNo[0]))
for i in range(1, len(autolayersNo)):
self.autolayers.append(torch.nn.Linear(autolayersNo[i - 1], autolayersNo[i]))
self.bn.append(torch.nn.BatchNorm1d(autolayersNo[i]))
for i in range(len(autolayersNo) - 2, -1, -1):
self.autolayers.append(torch.nn.Linear(autolayersNo[i + 1], autolayersNo[i]))
self.bn.append(torch.nn.BatchNorm1d(autolayersNo[i]))
self.lastLayer2 = torch.nn.Linear(autolayersNo[0], in_channels )
self.bn.append(torch.nn.BatchNorm1d(in_channels))
self.lastLayer = torch.nn.Linear(in_channels , paraout)
self.autolayersNo = autolayersNo
self.premode=False
self.para=None
def setpremode(self,premode=False):
self.premode=premode
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, adjs, xnode):
# `train_loader` computes the k-hop neighborhood of a batch of nodes,
# and returns, for each layer, a bipartite graph object, holding the
# bipartite edges `edge_index`, the index `e_id` of the original edges,
# and the size/shape `size` of the bipartite graph.
# Target nodes are also included in the source nodes so that one can
# easily apply skip-connections or add self-loops.
out = self.gcnmodel(x, adjs, xnode )
if self.premode:
return out
all_g1 = grad(out, xnode, grad_outputs=torch.ones_like(out), create_graph=True,retain_graph=True)[0]
all_g2 = grad(all_g1, xnode,grad_outputs=torch.ones_like(all_g1),retain_graph=True)[0]
lat_g1 = all_g1[:,0]
lat_g2 = all_g2[:,0]
lon_g1 = all_g1[:,1]
lon_g2 = all_g2[:,1]
con_gt = all_g1[:,2]
res = []
if len(self.autolayers) > 0:
xin = xnode
#if self.nattlayer is not None:
res.append(xin)
x = F.relu(self.autolayers[0](xin))
x = self.bn[0](x)
for i in range(1, len(self.autolayers)):
if i <= len(self.autolayersNo) - 1:
res.append(x)
x = F.relu(self.autolayers[i](x))
x = self.bn[i](x)
if i >= len(self.autolayersNo):
x = x + res.pop()
x = self.lastLayer2(x)
x = self.bn[i + 1](F.relu(x))
x = x + res.pop()
self.para = self.lastLayer(x)
res=con_gt + self.para[:,0]*lat_g1 + self.para[:,1]*lon_g1 - self.para[:,2]*(lat_g2+lat_g1*lat_g1) - \
self.para[:,3]*(lon_g2+lon_g1*lon_g1) - self.para[:,4]
return out, res
| [
"torch.ones_like",
"torch.nn.ModuleList",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"os.path.abspath",
"sys.path.append"
] | [((178, 203), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (193, 203), False, 'import os\n'), ((215, 235), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (230, 235), False, 'import sys\n'), ((2829, 2850), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (2848, 2850), False, 'import torch\n'), ((2869, 2890), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (2888, 2890), False, 'import torch\n'), ((2911, 2932), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (2930, 2932), False, 'import torch\n'), ((2955, 2976), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (2974, 2976), False, 'import torch\n'), ((4140, 4185), 'torch.nn.Linear', 'torch.nn.Linear', (['autolayersNo[0]', 'in_channels'], {}), '(autolayersNo[0], in_channels)\n', (4155, 4185), False, 'import torch\n'), ((4278, 4315), 'torch.nn.Linear', 'torch.nn.Linear', (['in_channels', 'paraout'], {}), '(in_channels, paraout)\n', (4293, 4315), False, 'import torch\n'), ((3560, 3605), 'torch.nn.Linear', 'torch.nn.Linear', (['in_channels', 'autolayersNo[0]'], {}), '(in_channels, autolayersNo[0])\n', (3575, 3605), False, 'import torch\n'), ((3634, 3671), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['autolayersNo[0]'], {}), '(autolayersNo[0])\n', (3654, 3671), False, 'import torch\n'), ((4214, 4247), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['in_channels'], {}), '(in_channels)\n', (4234, 4247), False, 'import torch\n'), ((6098, 6107), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6104, 6107), True, 'import torch.nn.functional as F\n'), ((3762, 3815), 'torch.nn.Linear', 'torch.nn.Linear', (['autolayersNo[i - 1]', 'autolayersNo[i]'], {}), '(autolayersNo[i - 1], autolayersNo[i])\n', (3777, 3815), False, 'import torch\n'), ((3848, 3885), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['autolayersNo[i]'], {}), '(autolayersNo[i])\n', (3868, 3885), False, 'import torch\n'), ((3985, 4038), 'torch.nn.Linear', 'torch.nn.Linear', (['autolayersNo[i + 1]', 'autolayersNo[i]'], {}), '(autolayersNo[i + 1], autolayersNo[i])\n', (4000, 4038), False, 'import torch\n'), ((4071, 4108), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['autolayersNo[i]'], {}), '(autolayersNo[i])\n', (4091, 4108), False, 'import torch\n'), ((5193, 5213), 'torch.ones_like', 'torch.ones_like', (['out'], {}), '(out)\n', (5208, 5213), False, 'import torch\n'), ((5304, 5327), 'torch.ones_like', 'torch.ones_like', (['all_g1'], {}), '(all_g1)\n', (5319, 5327), False, 'import torch\n'), ((3407, 3448), 'torch.nn.Linear', 'torch.nn.Linear', (['in_channels', 'in_channels'], {}), '(in_channels, in_channels)\n', (3422, 3448), False, 'import torch\n'), ((3490, 3523), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['in_channels'], {}), '(in_channels)\n', (3510, 3523), False, 'import torch\n')] |
#########
# GLOBALS
#########
from itertools import islice
import pandas as pd
import dateutil.parser as dp
from scipy.stats import boxcox
from realtime_talib import Indicator
#from nltk import word_tokenize
#from nltk.corpus import stopwords
#from nltk.stem.porter import *
#from scipy.integrate import simps
#from sklearn.model_selection import train_test_split
#from sklearn.utils import resample
#from selenium import webdriver
RANDOM_STATE = 42
#######################
# GENERAL PREPROCESSORS
#######################
def calculate_indicators(ohlcv_df):
ohlcv_df = ohlcv_df.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv_df.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv_df = ohlcv_df.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int(dp.parse(temp_ohlcv_df.iloc[index]["Date"]).strftime('%s')) for index in range(temp_ohlcv_df.shape[0])]
temp_ohlcv_df["Date"] = pd.Series(unix_times).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv_df.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv_df = temp_ohlcv_df.iloc[::-1]
# Rate of Change Ratio
rocr3 = Indicator(temp_ohlcv_df, "ROCR", 3).getHistorical()[::-1]
rocr6 = Indicator(temp_ohlcv_df, "ROCR", 6).getHistorical()[::-1]
# Average True Range
atr = Indicator(temp_ohlcv_df, "ATR", 14).getHistorical()[::-1]
# On-Balance Volume
obv = Indicator(temp_ohlcv_df, "OBV").getHistorical()[::-1]
# Triple Exponential Moving Average
trix = Indicator(temp_ohlcv_df, "TRIX", 20).getHistorical()[::-1]
# Momentum
mom1 = Indicator(temp_ohlcv_df, "MOM", 1).getHistorical()[::-1]
mom3 = Indicator(temp_ohlcv_df, "MOM", 3).getHistorical()[::-1]
# Average Directional Index
adx14 = Indicator(temp_ohlcv_df, "ADX", 14).getHistorical()[::-1]
adx20 = Indicator(temp_ohlcv_df, "ADX", 20).getHistorical()[::-1]
# Williams %R
willr = Indicator(temp_ohlcv_df, "WILLR", 14).getHistorical()[::-1]
# Relative Strength Index
rsi6 = Indicator(temp_ohlcv_df, "RSI", 6).getHistorical()[::-1]
rsi12 = Indicator(temp_ohlcv_df, "RSI", 12).getHistorical()[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = Indicator(
temp_ohlcv_df, "MACD", 12, 26, 9).getHistorical()
macd, macd_signal, macd_hist = macd[::-
1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = Indicator(temp_ohlcv_df, "MA", 6, 1).getHistorical()[::-1]
ema12 = Indicator(temp_ohlcv_df, "MA", 12, 1).getHistorical()[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr),
len(rsi6), len(rsi12), len(macd), len(
macd_signal), len(macd_hist),
len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv_df = ohlcv_df[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv_df["MOM (1)"] = pd.Series(mom1[:min_length]).values
ohlcv_df["MOM (3)"] = pd.Series(mom3[:min_length]).values
ohlcv_df["ADX (14)"] = pd.Series(adx14[:min_length]).values
ohlcv_df["ADX (20)"] = pd.Series(adx20[:min_length]).values
ohlcv_df["WILLR"] = pd.Series(willr[:min_length]).values
ohlcv_df["RSI (6)"] = pd.Series(rsi6[:min_length]).values
ohlcv_df["RSI (12)"] = pd.Series(rsi12[:min_length]).values
ohlcv_df["MACD"] = pd.Series(macd[:min_length]).values
ohlcv_df["MACD (Signal)"] = pd.Series(macd_signal[:min_length]).values
ohlcv_df["MACD (Historical)"] = pd.Series(macd_hist[:min_length]).values
ohlcv_df["EMA (6)"] = pd.Series(ema6[:min_length]).values
ohlcv_df["EMA (12)"] = pd.Series(ema12[:min_length]).values
ohlcv_df["ROCR (3)"] = pd.Series(rocr3[:min_length]).values
ohlcv_df["ROCR (6)"] = pd.Series(rocr6[:min_length]).values
ohlcv_df["ATR (14)"] = pd.Series(atr[:min_length]).values
ohlcv_df["OBV"] = pd.Series(obv[:min_length]).values
ohlcv_df["TRIX (20)"] = pd.Series(trix[:min_length]).values
return ohlcv_df
def merge_datasets(origin_df, other_sets):
merged = origin_df
for set in other_sets:
merged = pd.merge(merged, set, on="Date")
return merged
def fix_null_vals(df):
if not df.isnull().any().any():
return df
else:
return df.fillna(method="ffill")
def add_lag_vars(df, lag=3):
new_df_dict = {}
for col_header in df.drop("Date", axis=1):
new_df_dict[col_header] = df[col_header]
for lag in range(1, lag + 1):
new_df_dict["%s_lag%d" %
(col_header, lag)] = df[col_header].shift(-lag)
new_df = pd.DataFrame(new_df_dict, index=df.index)
new_df["Date"] = df["Date"]
return new_df.dropna()
def power_transform(df):
for header in df.drop("Date", axis=1).columns:
if not any(df[header] < 0) and not any(df[header] == 0):
df[header] = boxcox(df[header])[0]
return df
def binarize_labels(df):
trends = [None]
for idx in range(df.shape[0] - 1):
diff = df.iloc[idx]["Close"] - df.iloc[idx + 1]["Close"]
if diff < 0:
trends.append(-1)
else:
trends.append(1)
df["Trend"] = pd.Series(trends).values
# df = df.drop(df.index[0])
return df
def recursive_feature_elim(df):
return df
####################
# TEXT PREPROCESSORS
####################
# TODO: All yours, @alichtman
######
# MAIN
######
def transformer(name):
if name == "calculate_indicators":
return calculate_indicators
elif name == "merge_datasets":
return merge_datasets
elif name == "binarize_labels":
return binarize_labels
elif name == "fix_null_vals":
return fix_null_vals
elif name == "add_lag_vars":
return add_lag_vars
elif name == "power_transform":
return power_transform
elif name == "select_features":
return recursive_feature_elim
| [
"pandas.Series",
"dateutil.parser.parse",
"scipy.stats.boxcox",
"pandas.merge",
"realtime_talib.Indicator",
"pandas.DataFrame"
] | [((4804, 4845), 'pandas.DataFrame', 'pd.DataFrame', (['new_df_dict'], {'index': 'df.index'}), '(new_df_dict, index=df.index)\n', (4816, 4845), True, 'import pandas as pd\n'), ((947, 968), 'pandas.Series', 'pd.Series', (['unix_times'], {}), '(unix_times)\n', (956, 968), True, 'import pandas as pd\n'), ((3119, 3147), 'pandas.Series', 'pd.Series', (['mom1[:min_length]'], {}), '(mom1[:min_length])\n', (3128, 3147), True, 'import pandas as pd\n'), ((3181, 3209), 'pandas.Series', 'pd.Series', (['mom3[:min_length]'], {}), '(mom3[:min_length])\n', (3190, 3209), True, 'import pandas as pd\n'), ((3244, 3273), 'pandas.Series', 'pd.Series', (['adx14[:min_length]'], {}), '(adx14[:min_length])\n', (3253, 3273), True, 'import pandas as pd\n'), ((3308, 3337), 'pandas.Series', 'pd.Series', (['adx20[:min_length]'], {}), '(adx20[:min_length])\n', (3317, 3337), True, 'import pandas as pd\n'), ((3369, 3398), 'pandas.Series', 'pd.Series', (['willr[:min_length]'], {}), '(willr[:min_length])\n', (3378, 3398), True, 'import pandas as pd\n'), ((3432, 3460), 'pandas.Series', 'pd.Series', (['rsi6[:min_length]'], {}), '(rsi6[:min_length])\n', (3441, 3460), True, 'import pandas as pd\n'), ((3495, 3524), 'pandas.Series', 'pd.Series', (['rsi12[:min_length]'], {}), '(rsi12[:min_length])\n', (3504, 3524), True, 'import pandas as pd\n'), ((3555, 3583), 'pandas.Series', 'pd.Series', (['macd[:min_length]'], {}), '(macd[:min_length])\n', (3564, 3583), True, 'import pandas as pd\n'), ((3623, 3658), 'pandas.Series', 'pd.Series', (['macd_signal[:min_length]'], {}), '(macd_signal[:min_length])\n', (3632, 3658), True, 'import pandas as pd\n'), ((3702, 3735), 'pandas.Series', 'pd.Series', (['macd_hist[:min_length]'], {}), '(macd_hist[:min_length])\n', (3711, 3735), True, 'import pandas as pd\n'), ((3769, 3797), 'pandas.Series', 'pd.Series', (['ema6[:min_length]'], {}), '(ema6[:min_length])\n', (3778, 3797), True, 'import pandas as pd\n'), ((3832, 3861), 'pandas.Series', 'pd.Series', (['ema12[:min_length]'], {}), '(ema12[:min_length])\n', (3841, 3861), True, 'import pandas as pd\n'), ((3896, 3925), 'pandas.Series', 'pd.Series', (['rocr3[:min_length]'], {}), '(rocr3[:min_length])\n', (3905, 3925), True, 'import pandas as pd\n'), ((3960, 3989), 'pandas.Series', 'pd.Series', (['rocr6[:min_length]'], {}), '(rocr6[:min_length])\n', (3969, 3989), True, 'import pandas as pd\n'), ((4024, 4051), 'pandas.Series', 'pd.Series', (['atr[:min_length]'], {}), '(atr[:min_length])\n', (4033, 4051), True, 'import pandas as pd\n'), ((4081, 4108), 'pandas.Series', 'pd.Series', (['obv[:min_length]'], {}), '(obv[:min_length])\n', (4090, 4108), True, 'import pandas as pd\n'), ((4144, 4172), 'pandas.Series', 'pd.Series', (['trix[:min_length]'], {}), '(trix[:min_length])\n', (4153, 4172), True, 'import pandas as pd\n'), ((4313, 4345), 'pandas.merge', 'pd.merge', (['merged', 'set'], {'on': '"""Date"""'}), "(merged, set, on='Date')\n", (4321, 4345), True, 'import pandas as pd\n'), ((5376, 5393), 'pandas.Series', 'pd.Series', (['trends'], {}), '(trends)\n', (5385, 5393), True, 'import pandas as pd\n'), ((2309, 2352), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""MACD"""', '(12)', '(26)', '(9)'], {}), "(temp_ohlcv_df, 'MACD', 12, 26, 9)\n", (2318, 2352), False, 'from realtime_talib import Indicator\n'), ((1222, 1257), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""ROCR"""', '(3)'], {}), "(temp_ohlcv_df, 'ROCR', 3)\n", (1231, 1257), False, 'from realtime_talib import Indicator\n'), ((1292, 1327), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""ROCR"""', '(6)'], {}), "(temp_ohlcv_df, 'ROCR', 6)\n", (1301, 1327), False, 'from realtime_talib import Indicator\n'), ((1386, 1421), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""ATR"""', '(14)'], {}), "(temp_ohlcv_df, 'ATR', 14)\n", (1395, 1421), False, 'from realtime_talib import Indicator\n'), ((1479, 1510), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""OBV"""'], {}), "(temp_ohlcv_df, 'OBV')\n", (1488, 1510), False, 'from realtime_talib import Indicator\n'), ((1585, 1621), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""TRIX"""', '(20)'], {}), "(temp_ohlcv_df, 'TRIX', 20)\n", (1594, 1621), False, 'from realtime_talib import Indicator\n'), ((1671, 1705), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""MOM"""', '(1)'], {}), "(temp_ohlcv_df, 'MOM', 1)\n", (1680, 1705), False, 'from realtime_talib import Indicator\n'), ((1739, 1773), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""MOM"""', '(3)'], {}), "(temp_ohlcv_df, 'MOM', 3)\n", (1748, 1773), False, 'from realtime_talib import Indicator\n'), ((1841, 1876), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""ADX"""', '(14)'], {}), "(temp_ohlcv_df, 'ADX', 14)\n", (1850, 1876), False, 'from realtime_talib import Indicator\n'), ((1911, 1946), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""ADX"""', '(20)'], {}), "(temp_ohlcv_df, 'ADX', 20)\n", (1920, 1946), False, 'from realtime_talib import Indicator\n'), ((2000, 2037), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""WILLR"""', '(14)'], {}), "(temp_ohlcv_df, 'WILLR', 14)\n", (2009, 2037), False, 'from realtime_talib import Indicator\n'), ((2102, 2136), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""RSI"""', '(6)'], {}), "(temp_ohlcv_df, 'RSI', 6)\n", (2111, 2136), False, 'from realtime_talib import Indicator\n'), ((2171, 2206), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""RSI"""', '(12)'], {}), "(temp_ohlcv_df, 'RSI', 12)\n", (2180, 2206), False, 'from realtime_talib import Indicator\n'), ((2546, 2582), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""MA"""', '(6)', '(1)'], {}), "(temp_ohlcv_df, 'MA', 6, 1)\n", (2555, 2582), False, 'from realtime_talib import Indicator\n'), ((2617, 2654), 'realtime_talib.Indicator', 'Indicator', (['temp_ohlcv_df', '"""MA"""', '(12)', '(1)'], {}), "(temp_ohlcv_df, 'MA', 12, 1)\n", (2626, 2654), False, 'from realtime_talib import Indicator\n'), ((5074, 5092), 'scipy.stats.boxcox', 'boxcox', (['df[header]'], {}), '(df[header])\n', (5080, 5092), False, 'from scipy.stats import boxcox\n'), ((815, 858), 'dateutil.parser.parse', 'dp.parse', (["temp_ohlcv_df.iloc[index]['Date']"], {}), "(temp_ohlcv_df.iloc[index]['Date'])\n", (823, 858), True, 'import dateutil.parser as dp\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
Script to train a resnet
to determine if a Stokes-I radio cutout
contains a giant radio galaxy candidate.
Copyright (c) 2022 <NAME>
See LICENSE.md in root directory for full BSD-3 license.
Adapted from
Author: <NAME>
License: BSD
Source: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import WeightedRandomSampler
import numpy as np
import pandas as pd
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import shutil
from collections import Counter
from astropy.coordinates import SkyCoord
import astropy.units as u
from datetime import datetime
import socket
# # Load Data
dataset_name = 'cutouts_res6arcsec_destsize350arcsec_nfields100'
model_name='resnet101'
data_inspection=False
start = time.time()
print('-'*80)
print('Giants Resnet training script')
print('-'*80)
hostname = socket.gethostname()
print("Data- and save-paths set based on host:", hostname)
if hostname.startswith('lgm4'):
base_path = '/data1/mostertrij/data/giants'
elif hostname.endswith('liacs.nl'):
base_path = '/data/mostertrij/data/giants'
elif hostname.startswith('kafka'):
base_path = '/home/rafael/data/mostertrij/data/giants'
else:
print("Edit this script to include the correct paths for your machine:", hostname)
quit()
data_dir = os.path.join(base_path, dataset_name)
trained_dir = os.path.join(base_path, 'trained_models')
os.makedirs(trained_dir,exist_ok=True)
print("Assuming dataset is located at:", data_dir)
print("Saving trained models at:", trained_dir)
# Data augmentation and normalization for training
# Just normalization for validation
print("\nLoad data")
image_dimension_before_rotation = 400
image_dimension = int(np.floor(image_dimension_before_rotation/np.sqrt(2)))
print("Image dimension before and after rotation in pixels:", image_dimension_before_rotation,
image_dimension)
"""
torchvision.transforms.Normalize(mean, std, inplace=False)[source]
Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: (mean[1],...,mean[n]) and std: (std[1],..,std[n]) for n channels,
this transform will normalize each channel of the input torch.
*Tensor i.e., output[channel] = (input[channel] - mean[channel]) / std[channel]
"""
data_mean = [0.2460, 0.6437, 0.4650]
data_std = [0.1285, 0.1169, 0.0789]
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
# Transforms for the giants
transforms.RandomVerticalFlip(),
transforms.RandomRotation((-180,180),expand=False),
transforms.Resize(image_dimension_before_rotation),
transforms.CenterCrop(image_dimension),
#transforms.RandomResizedCrop(image_dimension),
# #interpolation=<InterpolationMode.NEAREST: 'nearest'>, ),
#transforms.RandomGrayscale(p=1), # For BEES only!
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transforms.Normalize(data_mean, data_std )
]),
'val': transforms.Compose([
transforms.Resize(image_dimension_before_rotation),
transforms.CenterCrop(image_dimension),
#transforms.RandomGrayscale(p=1), # For BEES only!
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transforms.Normalize(data_mean, data_std )
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
# total_num_images = np.sum([len(image_datasets[x])
# for x in ['train', 'val']])
# weights = {x: total_num_images/len(image_datasets[x])
# for x in ['train', 'val']}
target_list = [t for _, t in image_datasets['train'].samples]
target_dict = Counter(target_list)
print(target_dict)
class_weights = [1/target_dict[t] for t in target_list]
target_list = torch.tensor(target_list)
weighted_sampler = WeightedRandomSampler(
weights=class_weights,
num_samples=len(image_datasets['train']),
replacement=True)
dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=4,
sampler=weighted_sampler,num_workers=4),
'val': torch.utils.data.DataLoader(image_datasets['val'], batch_size=4,
shuffle=True, num_workers=4)}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
inp = data_std * inp + data_mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(15,5))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.savefig(0.001) # pause a bit so that plots are updated
if data_inspection:
print(f"Showing training input examples (data_inspection={data_inspection})")
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
else:
print(f"Not showing training input examples (data_inspection={data_inspection})")
# # Training the model
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(), os.path.join(trained_dir,
f'model_weights_{model_name}_{dataset_name}_{datetime.today().date().isoformat()}.pth'))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# In[8]:
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure(figsize=(8,8))
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
if model_name == 'resnet101':
print("\nCreating a resnet101 model and load pretrained weights")
model_ft = models.resnet101(pretrained=True)
else:
print("\nCreating a resnet18 model and load pretrained weights")
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.5)
# # Train
print("\nTrain model")
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
print("\nSave final model")
torch.save(model_ft.state_dict(), os.path.join(trained_dir,
f'final_model_weights_{model_name}_{dataset_name}_{datetime.now().isoformat()}.pth'))
print(f"Done. Time taken: {time.time()-start:.1f} sec.")
| [
"numpy.clip",
"numpy.sqrt",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torchvision.models.resnet18",
"torch.cuda.is_available",
"torch.sum",
"datetime.datetime.today",
"torchvision.utils.make_grid",
"matplotlib.pyplot.imshow",
"torch.set_grad_enabled",
"torchvision.transforms.ToTensor",
"soc... | [((979, 990), 'time.time', 'time.time', ([], {}), '()\n', (988, 990), False, 'import time\n'), ((1070, 1090), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1088, 1090), False, 'import socket\n'), ((1525, 1562), 'os.path.join', 'os.path.join', (['base_path', 'dataset_name'], {}), '(base_path, dataset_name)\n', (1537, 1562), False, 'import os\n'), ((1577, 1618), 'os.path.join', 'os.path.join', (['base_path', '"""trained_models"""'], {}), "(base_path, 'trained_models')\n", (1589, 1618), False, 'import os\n'), ((1619, 1658), 'os.makedirs', 'os.makedirs', (['trained_dir'], {'exist_ok': '(True)'}), '(trained_dir, exist_ok=True)\n', (1630, 1658), False, 'import os\n'), ((4124, 4144), 'collections.Counter', 'Counter', (['target_list'], {}), '(target_list)\n', (4131, 4144), False, 'from collections import Counter\n'), ((4234, 4259), 'torch.tensor', 'torch.tensor', (['target_list'], {}), '(target_list)\n', (4246, 4259), False, 'import torch\n'), ((9502, 9524), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(2)'], {}), '(num_ftrs, 2)\n', (9511, 9524), True, 'import torch.nn as nn\n'), ((9570, 9591), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (9589, 9591), True, 'import torch.nn as nn\n'), ((9780, 9838), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer_ft'], {'step_size': '(10)', 'gamma': '(0.5)'}), '(optimizer_ft, step_size=10, gamma=0.5)\n', (9799, 9838), False, 'from torch.optim import lr_scheduler\n'), ((4423, 4535), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['train']"], {'batch_size': '(4)', 'sampler': 'weighted_sampler', 'num_workers': '(4)'}), "(image_datasets['train'], batch_size=4, sampler=\n weighted_sampler, num_workers=4)\n", (4450, 4535), False, 'import torch\n'), ((4571, 4669), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['val']"], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(4)'}), "(image_datasets['val'], batch_size=4, shuffle=\n True, num_workers=4)\n", (4598, 4669), False, 'import torch\n'), ((5024, 5042), 'numpy.clip', 'np.clip', (['inp', '(0)', '(1)'], {}), '(inp, 0, 1)\n', (5031, 5042), True, 'import numpy as np\n'), ((5047, 5074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (5057, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5093), 'matplotlib.pyplot.imshow', 'plt.imshow', (['inp'], {}), '(inp)\n', (5088, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5149, 5167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(0.001)'], {}), '(0.001)\n', (5160, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5443, 5478), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['inputs'], {}), '(inputs)\n', (5470, 5478), False, 'import torchvision\n'), ((5739, 5750), 'time.time', 'time.time', ([], {}), '()\n', (5748, 5750), False, 'import time\n'), ((8301, 8327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (8311, 8327), True, 'import matplotlib.pyplot as plt\n'), ((9164, 9197), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9180, 9197), False, 'from torchvision import datasets, models, transforms\n'), ((9288, 9320), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9303, 9320), False, 'from torchvision import datasets, models, transforms\n'), ((3680, 3705), 'os.path.join', 'os.path.join', (['data_dir', 'x'], {}), '(data_dir, x)\n', (3692, 3705), False, 'import os\n'), ((4837, 4862), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4860, 4862), False, 'import torch\n'), ((5128, 5144), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5137, 5144), True, 'import matplotlib.pyplot as plt\n'), ((7898, 7909), 'time.time', 'time.time', ([], {}), '()\n', (7907, 7909), False, 'import time\n'), ((8337, 8352), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8350, 8352), False, 'import torch\n'), ((1967, 1977), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1974, 1977), True, 'import numpy as np\n'), ((2636, 2669), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2667, 2669), False, 'from torchvision import datasets, models, transforms\n'), ((2715, 2746), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (2744, 2746), False, 'from torchvision import datasets, models, transforms\n'), ((2757, 2809), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(-180, 180)'], {'expand': '(False)'}), '((-180, 180), expand=False)\n', (2782, 2809), False, 'from torchvision import datasets, models, transforms\n'), ((2817, 2867), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_dimension_before_rotation'], {}), '(image_dimension_before_rotation)\n', (2834, 2867), False, 'from torchvision import datasets, models, transforms\n'), ((2877, 2915), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_dimension'], {}), '(image_dimension)\n', (2898, 2915), False, 'from torchvision import datasets, models, transforms\n'), ((3111, 3132), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3130, 3132), False, 'from torchvision import datasets, models, transforms\n'), ((3218, 3259), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['data_mean', 'data_std'], {}), '(data_mean, data_std)\n', (3238, 3259), False, 'from torchvision import datasets, models, transforms\n'), ((3310, 3360), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_dimension_before_rotation'], {}), '(image_dimension_before_rotation)\n', (3327, 3360), False, 'from torchvision import datasets, models, transforms\n'), ((3370, 3408), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_dimension'], {}), '(image_dimension)\n', (3391, 3408), False, 'from torchvision import datasets, models, transforms\n'), ((3477, 3498), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3496, 3498), False, 'from torchvision import datasets, models, transforms\n'), ((3584, 3625), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['data_mean', 'data_std'], {}), '(data_mean, data_std)\n', (3604, 3625), False, 'from torchvision import datasets, models, transforms\n'), ((8559, 8580), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (8568, 8580), False, 'import torch\n'), ((7147, 7178), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (7156, 7178), False, 'import torch\n'), ((8684, 8730), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(num_images // 2)', '(2)', 'images_so_far'], {}), '(num_images // 2, 2, images_so_far)\n', (8695, 8730), True, 'import matplotlib.pyplot as plt\n'), ((10195, 10206), 'time.time', 'time.time', ([], {}), '()\n', (10204, 10206), False, 'import time\n'), ((6636, 6676), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (6658, 6676), False, 'import torch\n'), ((6753, 6774), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (6762, 6774), False, 'import torch\n'), ((10132, 10146), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10144, 10146), False, 'from datetime import datetime\n'), ((7817, 7833), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7831, 7833), False, 'from datetime import datetime\n')] |
#!/usr/bin/python -u
#
# create STAR files for "production" validator
#
#
#
from __future__ import absolute_import
import sys
import os
import sqlite3
import ConfigParser
import re
import pprint
if __package__ is None :
__package__ = "nmr-star-dictionary-scripts"
sys.path.append( os.path.abspath( os.path.join( os.path.split( __file__ )[0], ".." ) ) )
from scripts import BaseClass as BaseClass, quote4star as quote4star
else :
from . import BaseClass, quote4star
#
#
class ValidatorWriter( BaseClass ) :
"""Create dictionary file for interactive' validator"""
TEXTTYPE = "TEXT" # "VARCHAR(10000)" # DB type for text fields -- needed to be varchar for oracle/older postgres
# mode is an index into a (6-) character string of flags. so we can have different ones.
# never used anything other than 0 & 1
#
#MODE = 1 # BMRB annotation
#MODE = 0 # BMRB released
#
# we don't use mode 0 anymore either as there's different code for releasing entries now
# filename for mode 1 is validict.3 -- for histerical raisins
# can set self._mode if need to, but you'll need to rename the output file afterwards
#
DICTMODE = 1
OUTFILE = "validict.3.str"
# tbles to print out
#
TABLES = ["DATUMTYPES", "INFO", "SFCATS", "SFMANENUM", "STARCH", "TAGDEPS",
"TAGMANENUM", "TAGRELS", "TAGS", "VALENUMS", "VALTYPENUM" ]
# main
#
#
@classmethod
def create_validator_dictionary( cls, props, dburl = None, verbose = False ) :
obj = cls( verbose = verbose )
obj.config = props
outdir = os.path.realpath( props.get( "validict", "output_dir" ) )
if not os.path.isdir( outdir ) :
os.makedirs( outdir )
obj.connect()
obj.create_tables()
obj.attach( url = dburl )
obj.make_info()
obj.load_sfcats()
obj.load_tags()
obj.fix_loopmandatory()
# obj._verbose = True
obj.load_overrides()
# obj._verbose = False
obj.load_parent_child()
obj.fix_experiment_names()
obj.update_sf_links()
obj.load_datum_types()
obj.load_starch_table()
obj.update_enums()
obj.detach()
if len( obj.errors ) > 0 :
for e in obj.errors :
sys.stderr.write( e + "\n" )
obj.print_dictionary()
return obj
#
#
def __init__( self, *args, **kwargs ) :
super( self.__class__, self ).__init__( *args, **kwargs )
self._curs = None
self._mode = self.DICTMODE
self.errors = []
# we attach the "main" db as a sub-schema
#
def connect( self ) :
self._db = sqlite3.connect( ":memory:" )
self._curs = self._db.cursor()
self._curs2 = self._db.cursor()
def attach( self, url ) :
if url is None :
assert isinstance( self._props, ConfigParser.SafeConfigParser )
url = self._props.get( "dictionary", "sqlite3.file" )
self._curs.execute( "attach '%s' as star" % (url,) )
# self._db.isolation_level = None # -- do I need this anymore?
# commit and detach main database
#
def detach( self ) :
self._db.commit()
self._curs.execute( "detach star" )
####################################################################################################
#
#
#
def create_tables( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".create_tables()\n" )
ddl = self._props.get( "validict", "scriptfile" )
with open( ddl, "rU" ) as fin :
script = "".join( i for i in fin )
self._curs.executescript( script )
####################################################################################################
# fill version table
# after create_tables() and attaching main DB
#
def make_info( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".make_info()\n" )
self._curs.execute( "delete from info" )
self._curs.execute( "select defaultvalue from star.dict where originaltag='_Entry.NMR_STAR_version'" )
row = self._curs.fetchone()
if row is None :
raise Exception( "Error: no dictionary version" )
self._curs.execute( "insert into info (dmode, version, dictflag) values (:mod,:vers,'Y')",
{ "mod" : self.DICTMODE, "vers" : str( row[0] ).strip() } )
####################################################################################################
# Load saveframe categories table
#
def load_sfcats( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_sfcats()\n" )
qry = "select dictionaryseq from star.dict where originalcategory=:cat order by dictionaryseq limit 1"
sql = "insert into sfcats (id,sfcat,uniq,mandatory) values (:id,:cat,:uniq,:man)"
params = {}
self._curs.execute( "select sfcategory,validateflgs,aditreplicable from aditcatgrp" )
while True :
params.clear()
row = self._curs.fetchone()
if row is None : break
# this is for sorting the saveframes in the "proper" tag order: order's different (wrong) in the source
#
params["cat"] = row[0]
self._curs2.execute( qry, params )
if self.verbose :
sys.stdout.write( qry + "\n" )
pprint.pprint( params )
seqrow = self._curs2.fetchone()
if seqrow is None :
raise Exception( "No dictionary sequence for category %s" % (row[0],) )
if (seqrow is None) or (str( seqrow[0] ).strip() == "") :
raise Exception( "Empty dictionary sequence for category %s" % (row[0],) )
params["id"] = str( seqrow[0] ).strip()
params["man"] = row[1].strip().upper()[self._mode:self._mode + 1]
if row[2].strip().lower()[0:1] == "y" : params["uniq"] = "N"
else : params["uniq"] = "Y"
if self.verbose :
sys.stdout.write( sql + "\n" )
pprint.pprint( params )
self._curs2.execute( sql, params )
####################################################################################################
# Load tags table
#
def load_tags( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_tags()\n" )
sql = "insert into tags (seq,sfcat,tagname,tagcat,dbtable,dbcolumn,dbtype,dbnotnull," \
+ "dbpk,dbfktable,dbfkcolumn,dbfkgroup,valtype,valsize,mandatory,tagdepflag," \
+ "enumclosedflag,rowidxflag,localidflag,sfidflag,entryidflag,sflabelflag," \
+ "sfcatflag,sflinkflag,loopflag,loopmandatory,datumcount,metadata,deleteflag," \
+ "aditdefault,aditauto) values " \
+ "(:seq,:sfcat,:tag,:table,NULL,NULL,:dbtype,:notnull,:pk,:fktable,:fkcol,:fkgroup," \
+ ":type,:size,:man,NULL,:enumclosed,:rowidx,:localid,:sfid,:entryid,:sfname,:sfcatflag," \
+ ":sflink,:loop,NULL,:datumcnt,:metadata,:delete,:defval,:aditauto)"
qry = "select dictionaryseq,originalcategory,originaltag,tagcategory,dbtype,dbnullable," \
+ "primarykey,foreigntable,foreigncolumn,foreignkeygroup,validateflgs," \
+ "itemenumclosedflg,rowindexflg,lclidflg,sfidflg,entryidflg,sfnameflg," \
+ "sfcategoryflg,sfpointerflg,loopflag,datumcountflgs,metadataflgs,tagdeleteflgs," \
+ "lclsfidflg,defaultvalue,aditautoinsert from star.dict order by dictionaryseq"
varchar_pat = re.compile( r"char(?:\((\d+)\))?$", re.IGNORECASE )
params = {}
self._curs.execute( qry )
while True :
params.clear()
row = self._curs.fetchone()
if row is None : break
params["seq"] = row[0]
params["sfcat"] = row[1]
params["tag"] = row[2]
params["table"] = row[3]
params["pk"] = row[6]
params["fktable"] = row[7]
params["fkcol"] = row[8]
params["fkgroup"] = row[9]
# size only relevant for strings
# convert infomix types to postgres
#
params["dbtype"] = row[4].lower()
params["size"] = None
params["type"] = "STRING"
m = varchar_pat.search( params["dbtype"] )
if m :
params["size"] = m.group( 1 )
if not params["size"].isdigit() :
raise Exception( "Error: value size is not a number for %s" % (row[2],) )
elif params["dbtype"].find( "text" ) >= 0 : params["dbtype"] = self.TEXTTYPE
elif params["dbtype"].find( "integer" ) >= 0 : params["type"] = "INTEGER"
elif params["dbtype"].find( "float" ) >= 0 : params["type"] = "FLOAT"
elif params["dbtype"].find( "real" ) >= 0 : params["type"] = "FLOAT"
elif params["dbtype"].find( "date" ) >= 0 :
params["type"] = "DATE"
params["dbtype"] = "DATE"
# NOT NULL: either not null or primary key
params["notnull"] = None
if (row[5] is not None) and (row[5].strip().lower()[0:1] == "n") : params["notnull"] = "N"
if (row[6] is not None) and (row[6].strip().lower()[0:1] == "y") : params["notnull"] = "N"
if row[10] is None :
raise Exception( "Error: no mandatory flags for %s" % (row[2],) )
params["man"] = row[10].strip().upper()[self._mode:self._mode + 1]
# Flags
# closed enumeration
#
if row[11] is None : params["enumclosed"] = "N"
elif row[11].strip().lower()[0:1] == "y" : params["enumclosed"] = "Y"
else : params["enumclosed"] = "N"
# row index
#
params["rowidx"] = "N"
if (row[12] is not None) and (row[12].strip().lower()[0:1] == "y") :
params["rowidx"] = "Y"
# local id (key) for the table - not used
# global sf id
#
params["sfid"] = "N"
if (row[14] is not None) and (row[14].strip().lower()[0:1] == "y") :
params["sfid"] = "Y"
# entry id
#
params["entryid"] = "N"
if (row[15] is not None) and (row[15].strip().lower()[0:1] == "y") :
params["entryid"] = "Y"
# saveframe name (framecode)
#
params["sfname"] = "N"
if (row[16] is not None) and (row[16].strip().lower()[0:1] == "y") :
params["sfname"] = "Y"
# saveframe category flag
#
params["sfcatflag"] = "N"
if (row[17] is not None) and (row[17].strip().lower()[0:1] == "y") :
params["sfcatflag"] = "Y"
# not 'Y' for _label tags
# set default, fix later
#
params["sflink"] = "N"
# saveframe pointer (framecode with $ in front)
#
if (row[18] is not None) and (row[18].strip().lower()[0:1] == "y") :
params["type"] = "FRAMECODE"
# loop tag
#
params["loop"] = "N"
if (row[19] is not None) and (row[19].strip().lower()[0:1] == "y") :
params["loop"] = "Y"
# datum count
# these tags are for generating _Datum. loop
#
params["datumcnt"] = "N"
if (row[20] is not None) and (row[20].strip().lower()[0:1] == "y") :
params["datumcnt"] = "Y"
# metadata
# "interactive" validator does not load large data tables: too slow
#
params["metadata"] = "N"
if (row[21] is not None) and (row[21].strip().lower()[0:1] == "y") :
params["metadata"] = "Y"
# tag delete
# this is used for wide loops where most tags are not filled in. noramlly all loop tags are printed.
# added on annotators request
#
params["delete"] = "N"
if (row[22] is not None) and (row[22].strip().lower()[0:1] == "y") :
params["delete"] = "Y"
# local sf id
#
params["localid"] = "N"
if (row[23] is not None) and (row[23].strip().lower()[0:1] == "y") :
# spec. case: exclude entry IDs in Entry from "insert local ids" editing function -- its local id
# is entry id and not the saveframe number
if (row[2] != "_Entry.ID") and (row[2].find( ".Entry_ID" ) < 0) :
params["localid"] = "Y"
# default value
# Entry ID is a unique-enough string used w/ search and replace
#
params["defval"] = None
if row[2].find( ".Entry_ID" ) >= 0 : params["defval"] = "NEED_ACC_NUM"
elif row[24] is not None :
params["defval"] = row[24].strip()
if params["defval"] in ("?", ".") : params["defval"] = None
# autoinsert codes
#
# code 8 is saveframe label tag that has a matching _ID tag w/ code 7.
# it is "real data". other autoinsert codes are for automatically generated values that "aren't real"
# becasue we can re-create them anytime
#
params["aditauto"] = "N"
if row[25] is not None :
if not str( params["aditauto"] ).isdigit() : pass
if row[25] > 0 :
if row[25] != 8 :
params["aditauto"] = "Y"
if self.verbose :
sys.stdout.write( sql + "\n" )
pprint.pprint( params )
self._curs2.execute( sql, params )
# SF link flag
#
sql = "update tags set sflinkflag='Y' where tagname=:tag"
self._curs.execute( "select tagname from tags where valtype='FRAMECODE'" )
while True :
row = self._curs.fetchone()
if row is None : break
tag = row[0].replace( "_label", "_ID" )
self._curs2.execute( sql, { "tag" : tag } )
# fixup, just in case
#
self._curs2.execute( "update tags set aditauto='Y' where aditdefault is not null" )
####################################################################################################
# If mandatory is V or M, select mandatory from sfcats where sfcat = ?
# if sfcat is optional, reset to R or C resp. I.e.
# "mandatory if saveframe exists" (R,C) vs. "mandatory always (implies: saveframe must exist)" (V,M)
#
def fix_loopmandatory( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".fix_loopmandatory()\n" )
sql = "update tags set mandatory=:man where seq=:seq"
qry = "select mandatory from sfcats where sfcat=:sfcat"
self._curs.execute( "select seq,sfcat,mandatory,tagname from tags where mandatory='V' or mandatory='M'" )
params = {}
while True :
params.clear()
row = self._curs.fetchone()
if row is None : break
params["seq"] = row[0]
params["sfcat"] = row[1]
if self.verbose :
pprint.pprint( row )
sys.stdout.write( qry + "\n" )
pprint.pprint( params )
self._curs2.execute( qry, params )
sfrow = self._curs2.fetchone()
if sfrow is None :
raise Exception( "Error: no saveframe category for tag # %s", row[0] )
if sfrow[0] == "O" :
params["man"] = row[2]
if params["man"] == "V" : params["man"] = "R"
elif params["man"] == "M" : params["man"] = "C"
self._curs2.execute( sql, params )
####################################################################################################
#
# Mandatory overrides
#
def load_overrides( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_overrides()\n" )
ovrsql = "insert into tagdeps (ctlseq,ctlvalue,seq,mandatory) values (:ctseq,:ctval,:seq,:man)"
tagsql = "update tags set tagdepflag='Y' where seq=:seq"
qry = "select t1.dictionaryseq,v.ctlvalue,t2.dictionaryseq,v.validateflags,t1.originaltag " \
+ "from star.validationlinks v " \
+ "join star.dict t1 on t1.originalcategory=v.ctlsfcategory and t1.originaltag=v.ctltag " \
+ "join star.dict t2 on t2.originalcategory=v.depsfcategory and t2.originaltag=v.deptag"
self._curs.execute( "select count(*) from star.validationlinks" )
row = self._curs.fetchone()
if row[0] < 1 :
raise Exception( "empty validationlinks table" )
self._curs.execute( "select count(*) from star.dict" )
row = self._curs.fetchone()
if row[0] < 1 :
raise Exception( "empty dict table" )
params = {}
if self._verbose :
sys.stdout.write( qry )
sys.stdout.write( "\n" )
self._curs.execute( qry )
while True :
params.clear()
row = self._curs.fetchone()
if row is None : break
if row[1] is not None :
if row[1].strip() == "*" : continue # ADIT wildcard, not used by validator
tag = row[4].strip()
if tag in ("_Entry_interview.View_mode","_Entry_interview.PDB_deposition",
"_Entry_interview.BMRB_deposition") :
continue # ADIT view-only tags
# let's not do that for now
# if (tag == "_Entity.Number_of_monomers") and (row[1] == "polymer") : # Eldon's software can't 'V' this one
# mandatory = "V"
# else :
params["man"] = row[3].strip().upper()[self._mode:self._mode+1]
params["ctseq"] = row[0]
params["ctval"] = row[1]
params["seq"] = row[2]
if self._verbose :
sys.stdout.write( ovrsql )
pprint.pprint( params )
rc = self._curs2.execute( ovrsql, params )
if self._verbose :
sys.stdout.write( "-- %s rows inserted\n" % (rc,) )
if self._verbose :
sys.stdout.write( tagsql )
pprint.pprint( params )
rc = self._curs2.execute( tagsql, params )
if self._verbose :
sys.stdout.write( "-- %s rows updated\n" % (rc,) )
####################################################################################################
#
# Tag relationships
# derived from foreign keys with no regard/support for compound keys
#
def load_parent_child( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_parent_child()\n" )
sql = "insert into tagrels (chldseq,prntseq) values (:childseq,:parentseq)"
self._curs.execute( "select t1.dictionaryseq,t2.dictionaryseq from dict t1 " \
+ "join dict t2 on t2.tagcategory=t1.foreigntable and t2.tagfield=t1.foreigncolumn " \
+ "where t1.foreigntable is not null and t1.foreigncolumn is not null " \
+ "order by t2.dictionaryseq" )
while True :
row = self._curs.fetchone()
if row is None : break
self._curs2.execute( sql, { "childseq" : row[0], "parentseq" : row[1] } )
####################################################################################################
#
# Turn off "enumclosed" flag for tags whose parent is _Experiment.Name
#
# Must run after load_parent_child()
#
def fix_experiment_names( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".fix_experiment_names()\n" )
sql = "update tags set enumclosedflag='N' where seq=:seq"
self._curs.execute( "select r.chldseq from tagrels r join tags t on t.seq=r.prntseq " \
+ "where t.tagname='_Experiment.Name'" )
while True :
row = self._curs.fetchone()
if row is None : break
self._curs2.execute( sql, { "seq": row[0] } )
####################################################################################################
#
# Saveframe link tags
#
def update_sf_links( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".update_sflinks()\n" )
# match _label and _ID tags
#
sql = "update tags set sflinkflag='Y' where tagname=:tag"
qry = "select seq from tags t join tagrels r on r.chldseq=t.seq where t.tagname=:tag"
self._curs.execute( "select seq,tagname from tags where valtype='FRAMECODE'" )
while True :
row = self._curs.fetchone()
if row is None : break
if row[1].find( "_label" ) < 0 :
self.errors.append( "tag %s does not end in '_label'" % (row[1],) )
continue
idtag = row[1].replace( "_label", "_ID" )
self._curs2.execute( qry, { "tag" : idtag } )
qrow = self._curs2.fetchone()
if qrow is None :
self.errors.append( "tag %s not found in related tags table. Missing foreign key?" % (idtag,) )
continue
self._curs2.execute( sql, { "tag" : idtag } )
# add _label to .Sf_framecode parent-child links
#
sql = "insert into tagrels (prntseq,chldseq) values (:parent,:child)"
qry = "select t1.tagcat from tags t1 join tagrels r on r.prntseq=t1.seq join tags t2 on t2.seq=r.chldseq " \
+ "where t2.tagname=:tag"
qry1 = "select seq from tags where tagname=:tag"
qry2 = "select prntseq,chldseq from tagrels where prntseq=:parent and chldseq=:child"
self._curs.execute( "select seq,tagname from tags where valtype='FRAMECODE'" )
while True :
row = self._curs.fetchone()
if row is None : break
idtag = row[1].replace( "_label", "_ID" )
self._curs2.execute( qry, { "tag" : idtag } )
qrow = self._curs2.fetchone()
if qrow is None :
self.errors.append( "parent tag for %s (%s) not found" % (idtag,row[1],) )
continue
fctag = "_" + qrow[0] + ".Sf_framecode"
self._curs2.execute( qry1, { "tag" : fctag } )
qrow = self._curs2.fetchone()
if qrow is None :
self.errors.append( "framecode tag %s (%s) not found" % (fctag,row[1],) )
continue
# only add if not already there
#
self._curs2.execute( qry2, { "parent" : qrow[0], "child" : row[0] } )
qrow = self._curs2.fetchone()
if qrow is None :
self._curs2.execute( sql, { "parent" : qrow[0], "child" : row[0] } )
####################################################################################################
#
# Datum types
# This was supposed to drive table template generator on the website: tables whose tablegen flag is "y"
# can be generated by that code. The list is in the properties file.
#
def load_datum_types( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_datum_types()\n" )
cats = self._props.get( "validict", "datum.categories" ).split()
sql = "insert into datumtypes (tagcat,datumtype,tablegen) values (:table,:datum,:flag)"
self._curs.execute( "select distinct tagcategory,datumcountflgs from star.dict " \
+ "where datumcountflgs is not null" )
while True :
row = self._curs.fetchone()
if row is None : break
flag = "N"
if row[0] in cats : flag = "Y"
self._curs2.execute( sql, { "table" : row[0], "datum" : row[1], "flag" : flag } )
####################################################################################################
#
# this one's supposed to drive STARch PHP on the website. not that hard-coding it here is any
# less work than hard-coding it there.
#
def load_starch_table( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".load_starch_table()\n" )
sql = "insert into starch (tagname,displname,displseq,rowidx,seqid,compidxid," \
+ "compid,atomid,atomtype,isotope,ambicode,val,minval,maxval,err,author," \
+ "tablegen,groupid) values (:tag,:label,:order,:idx,'N','N','N','N','N','N','N','N','N'," \
+ "'N','N','N','N',0)"
qry = "select tagname,seq,rowidxflag from tags where metadata<>'Y' and tagname like :tag " \
+ "order by seq"
curs3 = self._db.cursor()
self._curs.execute( "select distinct tagcat from datumtypes where tablegen='Y'" )
while True :
row = self._curs.fetchone()
if row is None : break
tagname = "_" + row[0] + ".%%"
self._curs2.execute( qry, { "tag" : tagname } )
while True :
qrow = self._curs2.fetchone()
if qrow is None : break
pos = qrow[0].find( "." )
if pos >= 0 : taglabel = qrow[0][pos + 1:].replace( "_", " " )
else : taglabel = "FIXME"
curs3.execute( sql, { "tag" : qrow[0], "label" : taglabel, "order" : qrow[1], "idx" : qrow[2] } )
self._curs.execute( "update starch set seqid='Y' where tagname like '%.Seq_ID%'" )
self._curs.execute( "update starch set seqid='Y' where tagname like '%.Auth_seq_ID%'" )
self._curs.execute( "update starch set author='Y' where tagname like '%.Auth_seq_ID%'" )
self._curs.execute( "update starch set compidxid='Y' where tagname like '%.Comp_index_ID%'" )
self._curs.execute( "update starch set compid='Y' where tagname like '%.Comp_ID%'" )
self._curs.execute( "update starch set compid='Y' where tagname like '%.Auth_comp_ID%'" )
self._curs.execute( "update starch set author='Y' where tagname like '%.Auth_comp_ID%'" )
self._curs.execute( "update starch set atomid='Y' where tagname like '%.Atom_ID%'" )
self._curs.execute( "update starch set atomid='Y' where tagname like '%.Auth_atom_ID%'" )
self._curs.execute( "update starch set author='Y' where tagname like '%.Auth_atom_ID%'" )
self._curs.execute( "update starch set atomtype='Y' where tagname like '%.Atom_type%'" )
self._curs.execute( "update starch set isotope='Y' where tagname like '%.Atom_isotope_number%'" )
self._curs.execute( "update starch set ambicode='Y' where tagname like '%.Ambiguity_code%'" )
for i in range( 1, 7 ) :
self._curs.execute( "update starch set groupid=:id where tagname like :tag",
{ "id" : i, "tag" : ("%%_ID_%d" % (i,)) } )
self._curs.execute( "update starch set val='Y' where tagname like '%.Val'" )
self._curs.execute( "update starch set val='Y' where tagname like '%._val'" )
self._curs.execute( "update starch set minval='Y' where tagname like '%.Val_min'" )
self._curs.execute( "update starch set minval='Y' where tagname like '%._val_min'" )
self._curs.execute( "update starch set maxval='Y' where tagname like '%.Val_max'" )
self._curs.execute( "update starch set maxval='Y' where tagname like '%._val_max'" )
self._curs.execute( "update starch set err='Y' where tagname like '%.Val_err'" )
self._curs.execute( "update starch set err='Y' where tagname like '%._val_err'" )
####################################################################################################
#
# enumerations
#
def update_enums( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".update_enums()\n" )
sql = "insert into valenums(seq,val) " \
+ "select seq,val from enumerations where val<>'?' and val<>'.' and val is not NULL"
self._curs.execute( sql )
####################################################################################################
# output
#
#
#
def print_table( self, table, out = sys.stdout ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".print_table(%s)\n" % (table,) )
self._curs.execute( "select count(*) from " + table )
row = self._curs.fetchone()
if row[0] < 1 :
raise Exception( "Empty %s table" % (table,) )
out.write( " save_%s\n" % (table,) )
out.write( " loop_\n" )
formats = []
self._curs.execute( "select * from " + table )
for i in self._curs.description :
out.write( " _%s\n" % (i[0],) )
sql = "select max( length( %s ) ) from %s" % (i[0], table)
self._curs2.execute( sql )
row = self._curs2.fetchone()
if row is None :
raise Exception( "Error: no field width for %s" % (i[0],) )
fmt = "%"
if row[0] > 0 : width = row[0] + 2
else : width = 3
fmt += "-%ds " % (width,)
formats.append( fmt )
out.write( "\n" )
while True :
row = self._curs.fetchone()
if row is None : break
for i in range( len( row ) ) :
out.write( formats[i] % (quote4star( row[i] ),) )
out.write( "\n" )
out.write( "\n" )
out.write( " stop_\n" )
out.write( " save_\n\n" )
# printout
#
def print_dictionary( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".print()\n" )
outdir = os.path.realpath( self._props.get( "validict", "output_dir" ) )
if not os.path.isdir( outdir ) :
raise IOError( "Directory not found: %s" % (outdir,) )
outfile = os.path.join( outdir, self.OUTFILE )
version = "3.1"
self._curs.execute( "select version from info" )
row = self._curs.fetchone()
if row is None :
raise Exception( "Error: no version in the dictionary!" )
version = row[0]
with open( outfile, "w" ) as out :
out.write( "# this dictionary version is for the Java validator used by the annotators\n" )
out.write( "# and ADIT-NMR post-processor\n" )
out.write( "#\n" )
out.write( "data_%s\n\n" % (version,) )
for table in self.TABLES :
self.print_table( table, out )
####################################################################################################
#
if __name__ == "__main__" :
props = ConfigParser.SafeConfigParser()
props.read( sys.argv[1] )
dbfile = props.get( "dictionary", "sqlite3.file" )
if not os.path.exists( dbfile ) :
raise IOError( "File not found: %s (create dictionary first?)" % (dbfile,) )
db = ValidatorWriter.create_validator_dictionary( props, dburl = dbfile, verbose = True )
| [
"os.path.exists",
"sqlite3.connect",
"os.makedirs",
"re.compile",
"ConfigParser.SafeConfigParser",
"scripts.quote4star",
"os.path.join",
"os.path.split",
"sys.stderr.write",
"os.path.isdir",
"pprint.pprint",
"sys.stdout.write"
] | [((30286, 30317), 'ConfigParser.SafeConfigParser', 'ConfigParser.SafeConfigParser', ([], {}), '()\n', (30315, 30317), False, 'import ConfigParser\n'), ((2656, 2683), 'sqlite3.connect', 'sqlite3.connect', (['""":memory:"""'], {}), "(':memory:')\n", (2671, 2683), False, 'import sqlite3\n'), ((7617, 7668), 're.compile', 're.compile', (['"""char(?:\\\\((\\\\d+)\\\\))?$"""', 're.IGNORECASE'], {}), "('char(?:\\\\((\\\\d+)\\\\))?$', re.IGNORECASE)\n", (7627, 7668), False, 'import re\n'), ((29473, 29507), 'os.path.join', 'os.path.join', (['outdir', 'self.OUTFILE'], {}), '(outdir, self.OUTFILE)\n', (29485, 29507), False, 'import os\n'), ((30414, 30436), 'os.path.exists', 'os.path.exists', (['dbfile'], {}), '(dbfile)\n', (30428, 30436), False, 'import os\n'), ((1636, 1657), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (1649, 1657), False, 'import os\n'), ((1674, 1693), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1685, 1693), False, 'import os\n'), ((3414, 3478), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.create_tables()\\n')"], {}), "(self.__class__.__name__ + '.create_tables()\\n')\n", (3430, 3478), False, 'import sys\n'), ((3908, 3968), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.make_info()\\n')"], {}), "(self.__class__.__name__ + '.make_info()\\n')\n", (3924, 3968), False, 'import sys\n'), ((4630, 4692), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.load_sfcats()\\n')"], {}), "(self.__class__.__name__ + '.load_sfcats()\\n')\n", (4646, 4692), False, 'import sys\n'), ((6361, 6421), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.load_tags()\\n')"], {}), "(self.__class__.__name__ + '.load_tags()\\n')\n", (6377, 6421), False, 'import sys\n'), ((14241, 14309), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.fix_loopmandatory()\\n')"], {}), "(self.__class__.__name__ + '.fix_loopmandatory()\\n')\n", (14257, 14309), False, 'import sys\n'), ((15576, 15641), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.load_overrides()\\n')"], {}), "(self.__class__.__name__ + '.load_overrides()\\n')\n", (15592, 15641), False, 'import sys\n'), ((16598, 16619), 'sys.stdout.write', 'sys.stdout.write', (['qry'], {}), '(qry)\n', (16614, 16619), False, 'import sys\n'), ((16634, 16656), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (16650, 16656), False, 'import sys\n'), ((18358, 18426), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.load_parent_child()\\n')"], {}), "(self.__class__.__name__ + '.load_parent_child()\\n')\n", (18374, 18426), False, 'import sys\n'), ((19311, 19382), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.fix_experiment_names()\\n')"], {}), "(self.__class__.__name__ + '.fix_experiment_names()\\n')\n", (19327, 19382), False, 'import sys\n'), ((19951, 20016), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.update_sflinks()\\n')"], {}), "(self.__class__.__name__ + '.update_sflinks()\\n')\n", (19967, 20016), False, 'import sys\n'), ((22797, 22864), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.load_datum_types()\\n')"], {}), "(self.__class__.__name__ + '.load_datum_types()\\n')\n", (22813, 22864), False, 'import sys\n'), ((23753, 23821), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.load_starch_table()\\n')"], {}), "(self.__class__.__name__ + '.load_starch_table()\\n')\n", (23769, 23821), False, 'import sys\n'), ((27353, 27416), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.update_enums()\\n')"], {}), "(self.__class__.__name__ + '.update_enums()\\n')\n", (27369, 27416), False, 'import sys\n'), ((27806, 27881), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.print_table(%s)\\n' % (table,))"], {}), "(self.__class__.__name__ + '.print_table(%s)\\n' % (table,))\n", (27822, 27881), False, 'import sys\n'), ((29205, 29261), 'sys.stdout.write', 'sys.stdout.write', (["(self.__class__.__name__ + '.print()\\n')"], {}), "(self.__class__.__name__ + '.print()\\n')\n", (29221, 29261), False, 'import sys\n'), ((29361, 29382), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (29374, 29382), False, 'import os\n'), ((2268, 2294), 'sys.stderr.write', 'sys.stderr.write', (["(e + '\\n')"], {}), "(e + '\\n')\n", (2284, 2294), False, 'import sys\n'), ((5370, 5398), 'sys.stdout.write', 'sys.stdout.write', (["(qry + '\\n')"], {}), "(qry + '\\n')\n", (5386, 5398), False, 'import sys\n'), ((5417, 5438), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (5430, 5438), False, 'import pprint\n'), ((6058, 6086), 'sys.stdout.write', 'sys.stdout.write', (["(sql + '\\n')"], {}), "(sql + '\\n')\n", (6074, 6086), False, 'import sys\n'), ((6105, 6126), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (6118, 6126), False, 'import pprint\n'), ((13228, 13256), 'sys.stdout.write', 'sys.stdout.write', (["(sql + '\\n')"], {}), "(sql + '\\n')\n", (13244, 13256), False, 'import sys\n'), ((13275, 13296), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (13288, 13296), False, 'import pprint\n'), ((14815, 14833), 'pprint.pprint', 'pprint.pprint', (['row'], {}), '(row)\n', (14828, 14833), False, 'import pprint\n'), ((14852, 14880), 'sys.stdout.write', 'sys.stdout.write', (["(qry + '\\n')"], {}), "(qry + '\\n')\n", (14868, 14880), False, 'import sys\n'), ((14899, 14920), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (14912, 14920), False, 'import pprint\n'), ((17596, 17620), 'sys.stdout.write', 'sys.stdout.write', (['ovrsql'], {}), '(ovrsql)\n', (17612, 17620), False, 'import sys\n'), ((17639, 17660), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (17652, 17660), False, 'import pprint\n'), ((17766, 17815), 'sys.stdout.write', 'sys.stdout.write', (["('-- %s rows inserted\\n' % (rc,))"], {}), "('-- %s rows inserted\\n' % (rc,))\n", (17782, 17815), False, 'import sys\n'), ((17867, 17891), 'sys.stdout.write', 'sys.stdout.write', (['tagsql'], {}), '(tagsql)\n', (17883, 17891), False, 'import sys\n'), ((17910, 17931), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (17923, 17931), False, 'import pprint\n'), ((18037, 18085), 'sys.stdout.write', 'sys.stdout.write', (["('-- %s rows updated\\n' % (rc,))"], {}), "('-- %s rows updated\\n' % (rc,))\n", (18053, 18085), False, 'import sys\n'), ((322, 345), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (335, 345), False, 'import os\n'), ((28963, 28981), 'scripts.quote4star', 'quote4star', (['row[i]'], {}), '(row[i])\n', (28973, 28981), True, 'from scripts import BaseClass as BaseClass, quote4star as quote4star\n')] |
from panini import app as panini_app
from panini.middleware.debug_middleware import DebugMiddleware
app = panini_app.App(
service_name="debug_middleware_example",
host="127.0.0.1",
port=4222,
)
message = {
"key1": "value1",
"key2": 2,
"key3": 3.0,
"key4": [1, 2, 3, 4],
"key5": {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5},
"key6": {"subkey1": "1", "subkey2": 2, "3": 3, "4": 4, "5": 5},
"key7": None,
}
@app.task()
async def publish():
for _ in range(10):
await app.request(subject="some.publish.subject", message=message)
@app.listen("some.publish.subject")
async def receive_messages(msg):
return {"success": True}
if __name__ == "__main__":
app.add_middleware(DebugMiddleware, log_level="info")
app.start()
| [
"panini.app.App"
] | [((107, 195), 'panini.app.App', 'panini_app.App', ([], {'service_name': '"""debug_middleware_example"""', 'host': '"""127.0.0.1"""', 'port': '(4222)'}), "(service_name='debug_middleware_example', host='127.0.0.1',\n port=4222)\n", (121, 195), True, 'from panini import app as panini_app\n')] |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score, roc_auc_score
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.impute import SimpleImputer
import functools
import pandas as pd
import math
from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS
from src.model.baseline import Baseline, LogisticBaseline
from src.plotutils import method_color, method_marker, set_mpl_default_settings
cols_sel = ['MMSE', 'PTGENDER', 'APOE4', 'AGE', 'PTEDUCAT', 'FDG',
'ABETA', 'TAU', 'PTAU', 'CDRSB', 'ADAS11', 'ADAS13', 'ADASQ4', 'RAVLT_immediate',
'RAVLT_learning', 'RAVLT_forgetting', 'RAVLT_perc_forgetting', 'LDELTOTAL',
'TRABSCOR', 'FAQ', 'MOCA', 'EcogPtMem', 'EcogPtLang', 'EcogPtVisspat', 'EcogPtPlan',
'EcogPtOrgan', 'EcogPtDivatt', 'EcogPtTotal', 'EcogSPMem', 'EcogSPLang', 'EcogSPVisspat',
'EcogSPPlan', 'EcogSPOrgan', 'EcogSPDivatt', 'EcogSPTotal',
'Ventricles', 'Hippocampus', 'WholeBrain', 'Entorhinal', 'Fusiform', 'MidTemp', 'ICV']
cols_categorical = ['PTGENDER', 'APOE4']
def quickADNI(set_path, task, priv_points, nan_threshold = 0.7, seed = 42):
#Set target based on task
if task == 'MCIAD' or task == 'AD':
target = 'AD'
elif task == 'CNMCI':
target = 'MCI'
elif task == 'MMSE':
target = 'MMSE'
#Data from subjects present at measurements bl, m12, m24, m36, m48
#Selection: What data are to be used for training/evaluating (1/3 privileged points)
data_viscodes = ['bl', 'm12', 'm24', 'm36', 'm48']
if priv_points == 1:
selection_viscodes = ['bl', 'm24', 'm48']
elif priv_points == 3:
selection_viscodes = data_viscodes
else:
raise ValueError('priv_points invalid value: ' + str(priv_points))
#Read data.
D = pd.read_csv(set_path)
D['AD'] = D['DX']=='Dementia'
D['MCI'] = D['DX']=='MCI'
D.loc[D['DX'].isna(), ['AD', 'MCI']] = np.nan
D.loc[:,'ABETA'] = D.loc[:,'ABETA'].replace('>1700', 1700, regex=True) \
.replace('<900', 900, regex=True) \
.replace('<200', 200, regex=True).astype(np.float32)
D.loc[:,'TAU'] = D.loc[:,'TAU'].replace('>1300', 1300, regex=True) \
.replace('<80', 80, regex=True).astype(np.float32)
D.loc[:,'PTAU'] = D.loc[:,'PTAU'].replace('>120', 120, regex=True) \
.replace('<8', 8, regex=True).astype(np.float32)
D = D.loc[:,['VISCODE', 'RID', 'MCI', 'AD'] + cols_sel]
D = pd.get_dummies(D, columns=cols_categorical)
#Drop features with more than nan_threshold% of the observations missing
to_be_removed = []
for code in data_viscodes:
count = len(D[D['VISCODE'] == code])
l = D[D['VISCODE'] == code].isna().sum()
for i, e in enumerate(l):
if nan_threshold < e/count:
if D.columns[i] not in to_be_removed:
to_be_removed += [D.columns[i]]
D = D.drop(to_be_removed, axis=1)
#Start to packet data into X, Y
frames = {}
for code in data_viscodes:
if code == data_viscodes[-1]:
frames[code] = D[D['VISCODE'] == code].dropna(subset=[target])
else:
frames[code] = D[D['VISCODE'] == code]
#Subjects present at all 'data_viscodes' measurements
I = get_rids(frames, task, data_viscodes)
data = {}
for code in selection_viscodes:
data[code] = frames[code][frames[code]['RID'].isin(I)]
print(task)
if task != 'MMSE':
print('Number of subjects: '+str(len(I)))
print('Number of positives at last time step: '+str(len(data[selection_viscodes[-1]][data[selection_viscodes[-1]][target] == 1].index)))
print('Number of negatives at last time step: '+str(len(data[selection_viscodes[-1]][data[selection_viscodes[-1]][target] == 0].index)))
else:
print('Number of subjects: '+str(len(I)))
features = [e for e in D.columns if e not in ['RID', 'VISCODE', 'MCI', 'AD']]
X = np.zeros((len(I), len(selection_viscodes)-1, len(features)))
data[selection_viscodes[-1]] = data[selection_viscodes[-1]].sort_values(by=['RID'])
Y = data[selection_viscodes[-1]][target].values
feature_index = {}
for j, code in enumerate(selection_viscodes[0:len(selection_viscodes)-1]):
data[code] = data[code].sort_values(by=['RID'])
data[code] = data[code].loc[:,features]
for feature in features:
feature_index[feature] = data[code].columns.get_loc(feature)
X[:,j,:] = data[code].values
data_size = len(X)
models = {}
#Set models to based on task regression/classification
if task != 'MMSE':
models['Baseline'] = LogisticBaseline(cv_search=True, folds=5, random_state = seed)
models['LuPTS'] = LogisticLUPTS(cv_search=True, folds=5, random_state = seed)
if priv_points == 3:
models['Stat-LuPTS'] = LogisticStatLUPTS(cv_search=True, folds=5, random_state = seed)
else:
models['Baseline'] = Baseline()
models['LuPTS'] = LUPTS()
if priv_points == 3:
models['Stat-LuPTS'] = StatLUPTS()
step = 20
bottom = 80
top = math.floor(data_size*0.5)
top = top - (top % step)
#Range of training sample sizes
tr_sample_sizes = range(bottom, top, step)
results = {}
np.random.seed(seed)
rkf = RepeatedKFold(n_splits=2, n_repeats=50, random_state=seed)
#Main loop
for sample_size in tr_sample_sizes:
results[sample_size] = {}
tmp_results = {}
for model_key in models.keys():
tmp_results[model_key] = []
#Splits, 2x50
for i, (I_tr, I_ts) in enumerate(rkf.split(X)):
sampled_I_tr = np.random.choice(I_tr, sample_size, replace=False)
training_data = X[sampled_I_tr,:,:].copy()
test_data = X[I_ts,:,:].copy()
#Impute missing values
for ixx, code in enumerate(selection_viscodes[0:len(selection_viscodes)-1]):
for j in range(training_data.shape[2]):
if all(np.isnan(training_data[:,ixx,j])):
print(j)
training_data[:,ixx,j] = np.mean(training_data[:,ixx-1,j])
imputer = SimpleImputer()
training_data[:,ixx,:] = imputer.fit_transform(training_data[:,ixx,:])
if ixx == 0:
test_data[:,ixx,:] = imputer.transform(test_data[:,ixx,:])
l_training_data = training_data.copy()
l_test_data = test_data.copy()
scaler = RobustScaler()
lupi_scaler = RobustScaler()
#Scale data for baseline
training_data[:,0,:] = scaler.fit_transform(training_data[:,0,:])
test_data[:,0,:] = scaler.transform(test_data[:,0,:])
#Scale data for LuPTS models, using observations over all time points per feature.
l_training_data = lupi_scaler.fit_transform(l_training_data.\
reshape((-1,X.shape[2]))).reshape((len(l_training_data), X.shape[1], X.shape[2]))
l_test_data= lupi_scaler.transform(l_test_data.reshape((-1,X.shape[2])))\
.reshape((len(I_ts), X.shape[1], X.shape[2]))
#Fit and evaluate models
for model_key in models.keys():
if (model_key == 'LuPTS') or (model_key == 'Stat-LuPTS'):
models[model_key].fit(l_training_data, Y[sampled_I_tr])
else:
models[model_key].fit(training_data, Y[sampled_I_tr])
if task != 'MMSE':
if (model_key == 'LuPTS') or (model_key == 'Stat-LuPTS'):
tmp_results[model_key] += [roc_auc_score(Y[I_ts], models[model_key].predict_proba(l_test_data)[:,1])]
else:
tmp_results[model_key] += [roc_auc_score(Y[I_ts], models[model_key].predict_proba(test_data)[:,1])]
else:
if (model_key == 'LuPTS') or (model_key == 'Stat-LuPTS'):
tmp_results[model_key] += [r2_score(Y[I_ts], models[model_key].predict(l_test_data))]
else:
tmp_results[model_key] += [r2_score(Y[I_ts], models[model_key].predict(test_data))]
#Record results over iterations
for model_key in models.keys():
results[sample_size][model_key] = [np.mean(tmp_results[model_key]), np.std(tmp_results[model_key])]
return results
def get_rids(frames, task, codes):
if task == 'AD' or task == 'MMSE':
pass
elif task == 'CNMCI':
#Select patients with a negative AD diagnosis at last time step
#Select patients with CN status at baseline.
frames[codes[-1]] = frames[codes[-1]][(frames[codes[-1]]['AD'] == 0)]
frames[codes[0]] = frames[codes[0]][((frames[codes[0]]['MCI'] == 0) & (frames[codes[0]]['AD'] == 0))]
elif task == 'MCIAD':
#Select patients who are NOT CN at last time step.
#Select patients with MCI at baseline.
frames[codes[-1]] = frames[codes[-1]][((frames[codes[-1]]['AD'] == 1) | (frames[codes[-1]]['MCI'] == 1))]
frames[codes[0]] = frames[codes[0]][((frames[codes[0]]['MCI'] == 1) & (frames[codes[0]]['AD'] == 0))]
patient_ID = {}
for code in codes:
patient_ID[code] = frames[code]['RID'].unique()
I = functools.reduce(lambda a, b: np.intersect1d(a, b), [patient_ID[k] for k in patient_ID.keys()])
return I
def plot_result_dict(results, ylabel, title):
set_mpl_default_settings()
fig = plt.figure(figsize=(6,6))
outer_keys = list(results.keys())
model_keys = list(results[outer_keys[0]].keys())
for model in model_keys:
mean = np.array([results[size][model][0] for size in outer_keys])
std = np.array([results[size][model][1] for size in outer_keys])
plt.plot(outer_keys, mean, color=method_color(model), marker=method_marker(model))
plt.fill_between(outer_keys, mean-std, mean+std, color=method_color(model), alpha=0.2)
plt.xlabel('Number of training samples')
plt.ylabel(ylabel)
plt.grid()
plt.title(title)
plt.legend(model_keys)
return fig
| [
"src.plotutils.set_mpl_default_settings",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"math.floor",
"matplotlib.pyplot.ylabel",
"src.model.lupts.LogisticStatLUPTS",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"src.model.baseline.LogisticBaseline",
"numpy.random.seed",
"sklearn.mo... | [((2145, 2166), 'pandas.read_csv', 'pd.read_csv', (['set_path'], {}), '(set_path)\n', (2156, 2166), True, 'import pandas as pd\n'), ((2870, 2913), 'pandas.get_dummies', 'pd.get_dummies', (['D'], {'columns': 'cols_categorical'}), '(D, columns=cols_categorical)\n', (2884, 2913), True, 'import pandas as pd\n'), ((5554, 5581), 'math.floor', 'math.floor', (['(data_size * 0.5)'], {}), '(data_size * 0.5)\n', (5564, 5581), False, 'import math\n'), ((5716, 5736), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5730, 5736), True, 'import numpy as np\n'), ((5747, 5805), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': '(2)', 'n_repeats': '(50)', 'random_state': 'seed'}), '(n_splits=2, n_repeats=50, random_state=seed)\n', (5760, 5805), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((10011, 10037), 'src.plotutils.set_mpl_default_settings', 'set_mpl_default_settings', ([], {}), '()\n', (10035, 10037), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((10049, 10075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (10059, 10075), True, 'import matplotlib.pyplot as plt\n'), ((10552, 10592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of training samples"""'], {}), "('Number of training samples')\n", (10562, 10592), True, 'import matplotlib.pyplot as plt\n'), ((10597, 10615), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (10607, 10615), True, 'import matplotlib.pyplot as plt\n'), ((10620, 10630), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10628, 10630), True, 'import matplotlib.pyplot as plt\n'), ((10635, 10651), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (10644, 10651), True, 'import matplotlib.pyplot as plt\n'), ((10661, 10683), 'matplotlib.pyplot.legend', 'plt.legend', (['model_keys'], {}), '(model_keys)\n', (10671, 10683), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5136), 'src.model.baseline.LogisticBaseline', 'LogisticBaseline', ([], {'cv_search': '(True)', 'folds': '(5)', 'random_state': 'seed'}), '(cv_search=True, folds=5, random_state=seed)\n', (5092, 5136), False, 'from src.model.baseline import Baseline, LogisticBaseline\n'), ((5165, 5222), 'src.model.lupts.LogisticLUPTS', 'LogisticLUPTS', ([], {'cv_search': '(True)', 'folds': '(5)', 'random_state': 'seed'}), '(cv_search=True, folds=5, random_state=seed)\n', (5178, 5222), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((5392, 5402), 'src.model.baseline.Baseline', 'Baseline', ([], {}), '()\n', (5400, 5402), False, 'from src.model.baseline import Baseline, LogisticBaseline\n'), ((5429, 5436), 'src.model.lupts.LUPTS', 'LUPTS', ([], {}), '()\n', (5434, 5436), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((10216, 10274), 'numpy.array', 'np.array', (['[results[size][model][0] for size in outer_keys]'], {}), '([results[size][model][0] for size in outer_keys])\n', (10224, 10274), True, 'import numpy as np\n'), ((10289, 10347), 'numpy.array', 'np.array', (['[results[size][model][1] for size in outer_keys]'], {}), '([results[size][model][1] for size in outer_keys])\n', (10297, 10347), True, 'import numpy as np\n'), ((5289, 5350), 'src.model.lupts.LogisticStatLUPTS', 'LogisticStatLUPTS', ([], {'cv_search': '(True)', 'folds': '(5)', 'random_state': 'seed'}), '(cv_search=True, folds=5, random_state=seed)\n', (5306, 5350), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((5501, 5512), 'src.model.lupts.StatLUPTS', 'StatLUPTS', ([], {}), '()\n', (5510, 5512), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((6117, 6167), 'numpy.random.choice', 'np.random.choice', (['I_tr', 'sample_size'], {'replace': '(False)'}), '(I_tr, sample_size, replace=False)\n', (6133, 6167), True, 'import numpy as np\n'), ((6979, 6993), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (6991, 6993), False, 'from sklearn.preprocessing import RobustScaler\n'), ((7020, 7034), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (7032, 7034), False, 'from sklearn.preprocessing import RobustScaler\n'), ((9871, 9891), 'numpy.intersect1d', 'np.intersect1d', (['a', 'b'], {}), '(a, b)\n', (9885, 9891), True, 'import numpy as np\n'), ((6651, 6666), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (6664, 6666), False, 'from sklearn.impute import SimpleImputer\n'), ((8832, 8863), 'numpy.mean', 'np.mean', (['tmp_results[model_key]'], {}), '(tmp_results[model_key])\n', (8839, 8863), True, 'import numpy as np\n'), ((8865, 8895), 'numpy.std', 'np.std', (['tmp_results[model_key]'], {}), '(tmp_results[model_key])\n', (8871, 8895), True, 'import numpy as np\n'), ((10398, 10417), 'src.plotutils.method_color', 'method_color', (['model'], {}), '(model)\n', (10410, 10417), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((10426, 10446), 'src.plotutils.method_marker', 'method_marker', (['model'], {}), '(model)\n', (10439, 10446), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((10511, 10530), 'src.plotutils.method_color', 'method_color', (['model'], {}), '(model)\n', (10523, 10530), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((6474, 6508), 'numpy.isnan', 'np.isnan', (['training_data[:, ixx, j]'], {}), '(training_data[:, ixx, j])\n', (6482, 6508), True, 'import numpy as np\n'), ((6591, 6628), 'numpy.mean', 'np.mean', (['training_data[:, ixx - 1, j]'], {}), '(training_data[:, ixx - 1, j])\n', (6598, 6628), True, 'import numpy as np\n')] |
import os
import pandas as pd
import re
import subprocess
df = pd.read_csv("analysis_output/base_image_version_count.csv")
print(df.head())
df = df[:25].copy()
java_version = []
for i in range(len(df)):
try:
run_cmd = "docker run " + df["base-image:version"][i] + " java -version"
result = subprocess.check_output(run_cmd, stderr=subprocess.STDOUT, shell=True)
result = result.decode("utf-8")
if "openjdk version" in result:
java_version.append(re.findall(r"openjdk version.*\"", result)[0])
elif "java version" in result:
java_version.append(re.findall(r"java version.*\"", result)[0])
else:
java_version.append("")
except subprocess.CalledProcessError as exc:
print("ERROR CODE", exc.returncode, exc.output)
java_version.append("")
df["java_version"] = java_version
print(df)
df.to_csv(r'analysis_output/base_image_version_count_java.csv', index=False)
| [
"subprocess.check_output",
"re.findall",
"pandas.read_csv"
] | [((64, 123), 'pandas.read_csv', 'pd.read_csv', (['"""analysis_output/base_image_version_count.csv"""'], {}), "('analysis_output/base_image_version_count.csv')\n", (75, 123), True, 'import pandas as pd\n'), ((297, 367), 'subprocess.check_output', 'subprocess.check_output', (['run_cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(run_cmd, stderr=subprocess.STDOUT, shell=True)\n', (320, 367), False, 'import subprocess\n'), ((459, 501), 're.findall', 're.findall', (['"""openjdk version.*\\\\\\""""', 'result'], {}), '(\'openjdk version.*\\\\"\', result)\n', (469, 501), False, 'import re\n'), ((562, 601), 're.findall', 're.findall', (['"""java version.*\\\\\\""""', 'result'], {}), '(\'java version.*\\\\"\', result)\n', (572, 601), False, 'import re\n')] |
#!/usr/bin/python
#
# HRLAnalysis(TM) Software License - Version 1.0 - August 27th, 2013
#
# Permission is hereby granted, free of charge, to any person or
# organization obtaining a copy of the software and accompanying
# documentation covered by this license (the "Software") to use,
# reproduce, display, distribute, execute, and transmit the
# Software, and to prepare derivative works of the Software, and
# to permit third-parties to whom the Software is furnished to do
# so, all subject to the following:
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of the
# Software, unless such copies or derivative works are solely in
# the form of machine-executable object code generated by a source
# language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
# NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
# ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
# OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE, INCLUDING BUT NOT LIMITED TO THE
# COMPATIBILITY OF THIS LICENSE WITH OTHER SOFTWARE LICENSES.
#
import biggles
import numpy
class spikePlotterBiggles:
"""wrapper class for plotting the hrlAnalysis results."""
def __init__(self,name,startTime,endTime,startIdx,endIdx):
self.rows = 0
self.r = []
self.bPlotRaster = False
self.bPlotMean = False
self.bPlotCOV = False
self.name = name
self.startTime = startTime
self.endTime = endTime
self.startIdx = startIdx
self.endIdx = endIdx
def plotRaster(self,times,spikes):
r = biggles.FramedPlot()
r.xrange = self.startTime, self.endTime + 10
r.yrange = self.startIdx, self.endIdx
r.xlabel = "Time (ms)"
r.ylabel = "Cell Index"
r.add(biggles.Points(times,spikes,type="filled circle"))
self.r.append(r);
self.rows += 1
self.bPlotRaster = True
def plotRasterNewCells(self,times,spikes,startIdx,endIdx):
r = biggles.FramedPlot()
r.xrange = self.startTime, self.endTime + 10
r.yrange = startIdx, endIdx
r.xlabel = "Time (ms)"
r.ylabel = "Cell Index"
r.add(biggles.Points(times,spikes,type="filled circle"))
self.r.append(r);
self.rows += 1
self.bPlotRaster = True
def plotWindowRate(self,rates):
self.mean = biggles.FramedPlot()
self.mean.xrange = 0, len(rates) + 1
self.mean.yrange = 0, max(rates)
self.mean.xlabel = "Window"
self.mean.ylabel = "Fequency (Hz)"
self.mean.add(biggles.Curve(numpy.arange( 0, len(rates)),rates))
self.rows += 1
self.bPlotMean = True
def plotCOV(self,cells,COV):
self.cov = biggles.FramedPlot()
self.cov.xrange = self.startIdx, self.endIdx + 1
self.cov.yrange = 0, max(COV)
self.cov.xlabel = "Cell Index"
self.cov.ylabel = "COV"
self.cov.add(biggles.Points(cells,COV,type="filled circle"))
self.rows += 1
self.bPlotCOV = True
def plotCellRates(self,cells,rates):
pass
def plotSpikeBins(self,freqs,counts):
pass
def show(self):
self.p.show()
def savePlot(self,fileName):
currRow = 0
Table = biggles.Table(self.rows, 1)
if self.bPlotRaster:
for rasterPlot in self.r:
Table[currRow,0] = rasterPlot
currRow += 1
if self.bPlotMean:
Table[currRow,0] = self.mean
currRow += 1
if self.bPlotCOV:
Table[currRow,0] = self.cov
currRow += 1
Table.aspect_ratio = 0.5
Table.write_img(1600,800,fileName)
#Table.write_eps(fileName)
def closePlot(self):
pass
| [
"biggles.Points",
"biggles.Table",
"biggles.FramedPlot"
] | [((2157, 2177), 'biggles.FramedPlot', 'biggles.FramedPlot', ([], {}), '()\n', (2175, 2177), False, 'import biggles\n'), ((2563, 2583), 'biggles.FramedPlot', 'biggles.FramedPlot', ([], {}), '()\n', (2581, 2583), False, 'import biggles\n'), ((2969, 2989), 'biggles.FramedPlot', 'biggles.FramedPlot', ([], {}), '()\n', (2987, 2989), False, 'import biggles\n'), ((3351, 3371), 'biggles.FramedPlot', 'biggles.FramedPlot', ([], {}), '()\n', (3369, 3371), False, 'import biggles\n'), ((3936, 3963), 'biggles.Table', 'biggles.Table', (['self.rows', '(1)'], {}), '(self.rows, 1)\n', (3949, 3963), False, 'import biggles\n'), ((2354, 2405), 'biggles.Points', 'biggles.Points', (['times', 'spikes'], {'type': '"""filled circle"""'}), "(times, spikes, type='filled circle')\n", (2368, 2405), False, 'import biggles\n'), ((2750, 2801), 'biggles.Points', 'biggles.Points', (['times', 'spikes'], {'type': '"""filled circle"""'}), "(times, spikes, type='filled circle')\n", (2764, 2801), False, 'import biggles\n'), ((3559, 3607), 'biggles.Points', 'biggles.Points', (['cells', 'COV'], {'type': '"""filled circle"""'}), "(cells, COV, type='filled circle')\n", (3573, 3607), False, 'import biggles\n')] |
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import View
from django.views.generic import TemplateView
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404
from django.urls import reverse
from django.utils.html import escape
from django.core.paginator import Paginator
from application import views as ctrl
from application import forms
from server_secrets.hookModules import hookModules
analysisOptions = list()
for module in hookModules:
if hasattr(module, 'getAnalysisOptions'):
for entry in module.getAnalysisOptions():
analysisOptions.append(entry)
analysisOptions.sort(key=lambda item: (item.get('priority',100), item.get('text','')))
#
# Trivial views
#
class SoonView(TemplateView):
template_name = "soon.html"
class LegalTosView(TemplateView):
template_name = "legal/tos.html"
class LegalPrivView(TemplateView):
template_name = "legal/privacy.html"
class HelpView(TemplateView):
template_name = "help.html"
#
# ABSTRACT VIEWS
#
class TemplateViewLoggedIn(TemplateView, LoginRequiredMixin):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class UserPartEditFormView(TemplateViewLoggedIn):
''' Edits OneToOne fields - such as profiles '''
form = None
pagetitle = '??? Edit'
template_name = "forms/form.html"
on_success = 'index'
on_success_args = []
on_success_kwargs = {}
add_ip = False
add_user = False
'''def get_obj(self, bl, pk, ppk): pass'''
def get_redirection(self, user, model):
return reverse(
self.on_success,
None,
self.on_success_args,
self.on_success_kwargs
)
def build_form(self, bl=None, req_data=None, pk='0', ppk='0'):
obj = self.get_obj(bl,pk,ppk)
if self.add_ip:
obj.ip = bl._ip
if self.add_user:
obj.user = bl.user
return self.form(req_data,instance=obj)
def get(self,request,pk='0', ppk='0'):
rd = request.GET
if len(rd) == 0:
rd = None
return render(request,self.template_name,{
'title': self.pagetitle,
'form': self.build_form(ctrl.BusinessLogic(request), rd, pk, ppk),
})
def post(self,request,pk='0', ppk='0'):
bl = ctrl.BusinessLogic(request)
form = self.build_form(bl,request.POST, pk, ppk)
err = ''
saved = False
try:
if form.is_valid():
form.save()
saved = True
return HttpResponseRedirect(self.get_redirection(bl.user, form.instance))
except Exception as e:
err = escape(str(e)).strip().replace('\n','\n<br>\n')
return render(request, self.template_name,{
'title': self.pagetitle,
'form': form,
'err': err,
})
class CrudListView(TemplateViewLoggedIn):
''' Displays ForeingKey fields - such as lists '''
pagetitle = '???'
template_name = "forms/crudlist.html"
'''
pagetitle = '???'
template_name = "forms/crudlist.html"
item_template = None
edit_url_label = None
delete_url_label = None
def get_list_items(self, bl, ppk): pass
'''
def get(self,request,ppk='0'):
items = self.get_list_items(ctrl.BusinessLogic(request), ppk)
ll = ['0']
if ppk!='0': ll=[ppk,'0']
return render(request,self.template_name,{
'title': self.pagetitle,
'items': items,
'current_pk': ppk,
'item_template': self.item_template,
'addlink': reverse(self.edit_url_label,None,ll),
'delete_url_label': self.delete_url_label,
'edit_url_label': self.edit_url_label,
})
class CrudDeleteView(TemplateViewLoggedIn):
''' Deletes ForeingKey fields - such as list items '''
pagetitle = '???'
'''
pagetitle = '???'
def get_redirection(self, user, model): pass
def get_model(self, bl, pk): pass
'''
def get(self,request,pk='0'):
bl = ctrl.BusinessLogic(request)
redir = self.get_redirection(bl.user, None)
if pk!='0':
model = self.get_model(bl,pk)
redir = self.get_redirection(bl.user, model)
model.delete()
return HttpResponseRedirect(redir)
class CrudEditView(UserPartEditFormView):
''' Edits/adds ForeingKey fields - such as list items '''
pagetitle = '???'
template_name = "forms/form.html"
on_success = None
add_ip = True
add_user = True
manager = None
def get_obj(self, bl, pk, ppk):
if pk=='0':
obj = self.manager()
if ppk!='0':
self.insert_parent(obj, bl, ppk)
return obj
else: return self.manager.objects.get(pk=pk, user__pk=bl.user.pk)
#
# CONCRETE VIEWS
#
class HomeView(TemplateView):
template_name = "index.html"
def get(self,request):
bl = ctrl.BusinessLogic(request)
d = {}
d['bl'] = bl
d['loginform'] = False
if not bl.logged_in:
d['loginform'] = forms.AuthenticationForm()
else:
d['corpora'] = ctrl.models.Corpus.objects.filter(user__id=bl.user.id)
d['corpora'] = sorted(d['corpora'], key=lambda a: a.modifiedWithChild)[::-1]
return render(
request,
self.template_name,
d
)
class SettingsView(TemplateViewLoggedIn):
template_name = "settings/index.html"
class CorpusView(CrudListView):
template_name = "corpus/view.html"
pagetitle = 'Corpus'
item_template = 'forms/listitem_corpus_document.html'
edit_url_label = 'document_edt'
delete_url_label = 'document_del'
def get_list_items(self, bl, ppk): return ctrl.models.Document.objects.filter(corpus__pk=ppk, user__pk=bl.user.pk).order_by('title')
class CorpusDelView(CrudDeleteView):
def get_model(self, bl, pk): return ctrl.models.Corpus.objects.get(pk=pk, user__pk=bl.user.pk)
def get_redirection(self, user, model): return reverse('index')
class CorpusEdtView(CrudEditView):
template_name = "corpus/form.html"
pagetitle = 'Corpus'
on_success = 'index'
form = forms.CorpusForm
manager = ctrl.models.Corpus
class CorpusXplView(TemplateView):
def post(self,request,pk='0', ppk='0'):
return self.get(request,pk,ppk)
def get(self,request,pk='0', ppk='0'):
return HttpResponseRedirect(reverse('analysis', None, [pk]))
class DocumentDelView(CrudDeleteView):
def get_model(self, bl, pk): return ctrl.models.Document.objects.get(pk=pk, user__pk=bl.user.pk)
def get_redirection(self, user, model):
if model is None: return reverse('index')
else: return reverse('corpus', None, [model.corpus.pk])
class DocumentEdtView(CrudEditView):
template_name = 'corpus/docform.html'
pagetitle = 'Document'
on_success = 'corpus'
form = forms.DocumentForm
manager = ctrl.models.Document
def insert_parent(self, obj, bl, ppk):
exc = ctrl.models.Corpus.objects.get(user__pk=bl.user.pk, pk=ppk)
obj.corpus = exc
def get_redirection(self, user, model):
self.on_success_args = [model.corpus.pk]
return super().get_redirection(user,model)
class AnalysisView(TemplateView):
template_name = 'analysis.html'
def get(self, request, pk='0'):
return render(request, self.template_name, {
'corpus_pk': pk,
'tools': analysisOptions,
})
import json
from urllib.request import urlopen
class ServerStatsView(TemplateView):
template_name = 'server-stats.html'
def get(self, request):
stats = json.loads(ServerStatsJsonGetter(request).content.decode())
processed_stats = dict()
processed_stats['workers_total'] = 0
processed_stats['workers_busy'] = 0
processed_stats['workers_accepting_connections'] = 0
processed_stats['threads_total'] = 0
processed_stats['threads_busy'] = 0
processed_stats['requests_processed'] = 0
processed_stats['requests_processing'] = 0
for worker in stats['workers']:
processed_stats['workers_total']+=1
processed_stats['workers_accepting_connections']+= int(bool(worker['accepting']))
processed_stats['workers_busy']+=int(worker['status']=='busy')
for thread in worker['cores']:
processed_stats['threads_total']+=1
processed_stats['threads_busy']+=int(bool(thread['in_request']))
processed_stats['requests_processed']+=worker['requests']
processed_stats['requests_processing']=processed_stats['threads_busy']
processed_stats['workers_busy_pct'] = 100*processed_stats['workers_busy']/processed_stats['workers_total']
processed_stats['workers_avail_pct'] = 100*processed_stats['workers_accepting_connections']/processed_stats['workers_total']
processed_stats['threads_busy_pct'] = 100*processed_stats['threads_busy']/processed_stats['threads_total']
return render(request, self.template_name, {
'stats':processed_stats,
})
def ServerStatsJsonGetter(request):
with urlopen('http://127.0.0.1:14549') as urlstream:
return HttpResponse(
json.dumps(
json.loads(urlstream.read().decode()),
indent=4
),
content_type='application/json'
)
| [
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"application.forms.AuthenticationForm",
"application.views.models.Document.objects.get",
"django.utils.decorators.method_decorator",
"application.views.BusinessLogic",
"application.views.models.Document.objects.filter",
"django.urls.revers... | [((2468, 2500), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (2484, 2500), False, 'from django.utils.decorators import method_decorator\n'), ((3005, 3081), 'django.urls.reverse', 'reverse', (['self.on_success', 'None', 'self.on_success_args', 'self.on_success_kwargs'], {}), '(self.on_success, None, self.on_success_args, self.on_success_kwargs)\n', (3012, 3081), False, 'from django.urls import reverse\n'), ((3752, 3779), 'application.views.BusinessLogic', 'ctrl.BusinessLogic', (['request'], {}), '(request)\n', (3770, 3779), True, 'from application import views as ctrl\n'), ((4180, 4272), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'title': self.pagetitle, 'form': form, 'err': err}"], {}), "(request, self.template_name, {'title': self.pagetitle, 'form': form,\n 'err': err})\n", (4186, 4272), False, 'from django.shortcuts import render\n'), ((5522, 5549), 'application.views.BusinessLogic', 'ctrl.BusinessLogic', (['request'], {}), '(request)\n', (5540, 5549), True, 'from application import views as ctrl\n'), ((5763, 5790), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['redir'], {}), '(redir)\n', (5783, 5790), False, 'from django.http import HttpResponseRedirect\n'), ((6427, 6454), 'application.views.BusinessLogic', 'ctrl.BusinessLogic', (['request'], {}), '(request)\n', (6445, 6454), True, 'from application import views as ctrl\n'), ((6807, 6845), 'django.shortcuts.render', 'render', (['request', 'self.template_name', 'd'], {}), '(request, self.template_name, d)\n', (6813, 6845), False, 'from django.shortcuts import render\n'), ((7421, 7479), 'application.views.models.Corpus.objects.get', 'ctrl.models.Corpus.objects.get', ([], {'pk': 'pk', 'user__pk': 'bl.user.pk'}), '(pk=pk, user__pk=bl.user.pk)\n', (7451, 7479), True, 'from application import views as ctrl\n'), ((7531, 7547), 'django.urls.reverse', 'reverse', (['"""index"""'], {}), "('index')\n", (7538, 7547), False, 'from django.urls import reverse\n'), ((8046, 8106), 'application.views.models.Document.objects.get', 'ctrl.models.Document.objects.get', ([], {'pk': 'pk', 'user__pk': 'bl.user.pk'}), '(pk=pk, user__pk=bl.user.pk)\n', (8078, 8106), True, 'from application import views as ctrl\n'), ((8520, 8579), 'application.views.models.Corpus.objects.get', 'ctrl.models.Corpus.objects.get', ([], {'user__pk': 'bl.user.pk', 'pk': 'ppk'}), '(user__pk=bl.user.pk, pk=ppk)\n', (8550, 8579), True, 'from application import views as ctrl\n'), ((8871, 8956), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'corpus_pk': pk, 'tools': analysisOptions}"], {}), "(request, self.template_name, {'corpus_pk': pk, 'tools': analysisOptions}\n )\n", (8877, 8956), False, 'from django.shortcuts import render\n'), ((10550, 10613), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'stats': processed_stats}"], {}), "(request, self.template_name, {'stats': processed_stats})\n", (10556, 10613), False, 'from django.shortcuts import render\n'), ((10682, 10715), 'urllib.request.urlopen', 'urlopen', (['"""http://127.0.0.1:14549"""'], {}), "('http://127.0.0.1:14549')\n", (10689, 10715), False, 'from urllib.request import urlopen\n'), ((4757, 4784), 'application.views.BusinessLogic', 'ctrl.BusinessLogic', (['request'], {}), '(request)\n', (4775, 4784), True, 'from application import views as ctrl\n'), ((6580, 6606), 'application.forms.AuthenticationForm', 'forms.AuthenticationForm', ([], {}), '()\n', (6604, 6606), False, 'from application import forms\n'), ((6648, 6702), 'application.views.models.Corpus.objects.filter', 'ctrl.models.Corpus.objects.filter', ([], {'user__id': 'bl.user.id'}), '(user__id=bl.user.id)\n', (6681, 6702), True, 'from application import views as ctrl\n'), ((7933, 7964), 'django.urls.reverse', 'reverse', (['"""analysis"""', 'None', '[pk]'], {}), "('analysis', None, [pk])\n", (7940, 7964), False, 'from django.urls import reverse\n'), ((8184, 8200), 'django.urls.reverse', 'reverse', (['"""index"""'], {}), "('index')\n", (8191, 8200), False, 'from django.urls import reverse\n'), ((8222, 8264), 'django.urls.reverse', 'reverse', (['"""corpus"""', 'None', '[model.corpus.pk]'], {}), "('corpus', None, [model.corpus.pk])\n", (8229, 8264), False, 'from django.urls import reverse\n'), ((5063, 5101), 'django.urls.reverse', 'reverse', (['self.edit_url_label', 'None', 'll'], {}), '(self.edit_url_label, None, ll)\n', (5070, 5101), False, 'from django.urls import reverse\n'), ((7252, 7324), 'application.views.models.Document.objects.filter', 'ctrl.models.Document.objects.filter', ([], {'corpus__pk': 'ppk', 'user__pk': 'bl.user.pk'}), '(corpus__pk=ppk, user__pk=bl.user.pk)\n', (7287, 7324), True, 'from application import views as ctrl\n'), ((3641, 3668), 'application.views.BusinessLogic', 'ctrl.BusinessLogic', (['request'], {}), '(request)\n', (3659, 3668), True, 'from application import views as ctrl\n')] |
#MenuTitle: CopyCat
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
A tool for comparing two fonts.
"""
import sys, os, traceback
import importlib
from collections import OrderedDict
class BaseTestClass:
def setFonts(self, font1, font2):
self.font1 = font1
self.font2 = font2
def make_font_to_font_test(self, masterName1, masterName2, returnDetails=True, returnDocString=True, excludeGlyphs=[]):
results = self._parseTestMethods("font__", returnDetails, returnDocString, **dict(font1=self.font1, font2=self.font2))
master_id_1 = None
for master in self.font1.masters:
if master.name == masterName1:
master_id_1 = master.id
assert master_id_1 is not None, str(master_id_1)
master_id_2 = None
for master in self.font2.masters:
if master.name == masterName2:
master_id_2 = master.id
assert master_id_2 is not None, str(master_id_2)
for glyph1 in self.font1.glyphs:
if glyph1 is None: continue
glyph2 = self.font2.glyphForUnicode_(glyph1.unicode)
if glyph2 is None: continue
if glyph1.name in excludeGlyphs: continue
if glyph2.name in excludeGlyphs: continue
layer1 = glyph1.layers[master_id_1]
if layer1 is None: continue
layer2 = glyph2.layers[master_id_2]
if layer2 is None: continue
results += self._parseTestMethods("layer__", returnDetails, returnDocString, **dict(layer1=layer1, layer2=layer2))
return results
def parseLayerTestMethodsOnSpecificLayers(self, returnDetails, returnDocString, layer1, layer2):
self._parseTestMethods("layer__", returnDetails, returnDocString, **dict(layer1=layer1, layer2=layer2))
def parseFontTestMethods(self, returnDetails, returnDocString, font1, font2):
self._parseTestMethods("font__", returnDetails, returnDocString, **dict(font1=font1, font2=font2))
def _parseTestMethods(self, prefix, returnDetails, returnDocString, **kwargs):
method_list = [func for func in dir(self) if callable(getattr(self, func))]
resultList = []
for method_name in method_list:
if method_name.startswith(prefix):
abortTest = False
method = getattr(self, method_name)
try:
resultData = method(kwargs)
except:
print(traceback.format_exc())
print(f"\n\nErr: <{prefix[:-1]}> test <{method_name[len(prefix):]}>: \n\tcouldn't parse test with parameters:")
for k, v in kwargs.items():
print(f"\t\t {k} = {v}")
print("\t\tTest will be skipped.")
returnData = {}
abortTest = True
# I'm not sure about worning implementation, maybe I should introduce some logger object
try:
assert isinstance(resultData, dict)
except:
print(f"\n\nErr: <{prefix[:-1]}> test <{method_name[len(prefix):]}>:\n\t test doesn't return dict type.\n\t\tTest will be skipped.")
abortTest = True
try:
boolResult = resultData.get("boolResult")
assert isinstance(boolResult, bool)
except:
print(f"\n\nErr: <{prefix[:-1]}> test <{method_name[len(prefix):]}>:\n\t return dict doesn't contain proper \"boolResult\" key\n\t (key returns {boolResult}).\n\t\tTest will be skipped.")
abortTest = True
if abortTest: continue
testResult = OrderedDict([("method_name", method_name),("boolResult", boolResult)])
if returnDetails:
for k in sorted(list(resultData.keys())):
if k == "boolResult": continue
testResult[k] = resultData[k]
if returnDocString:
testResult["__doc__"] = method.__doc__
resultList.append(testResult)
return resultList
def main():
# testCase
from parsers.printOneLinerResultParser import PrintOneLinerResultParser
resultParser = PrintOneLinerResultParser(profileName="simple")
fonts = Glyphs.fonts
masterName = "Medium"
resultParser.make_font_to_font_test(fonts[0], fonts[1], masterName, masterName, returnDetails=True, returnDocString=True, excludeGlyphs=[])
if __name__ == '__main__':
main() | [
"traceback.format_exc",
"collections.OrderedDict",
"parsers.printOneLinerResultParser.PrintOneLinerResultParser"
] | [((3661, 3708), 'parsers.printOneLinerResultParser.PrintOneLinerResultParser', 'PrintOneLinerResultParser', ([], {'profileName': '"""simple"""'}), "(profileName='simple')\n", (3686, 3708), False, 'from parsers.printOneLinerResultParser import PrintOneLinerResultParser\n'), ((3205, 3276), 'collections.OrderedDict', 'OrderedDict', (["[('method_name', method_name), ('boolResult', boolResult)]"], {}), "([('method_name', method_name), ('boolResult', boolResult)])\n", (3216, 3276), False, 'from collections import OrderedDict\n'), ((2229, 2251), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2249, 2251), False, 'import sys, os, traceback\n')] |
import glob
import pathlib
import unittest as ut
import btorrent
class TestTorrent(ut.TestCase):
def test_torrent(self):
path = pathlib.Path('tests/files/test_0.torrent')
tt_0 = btorrent.Torrent(btorrent.TorrentFile.from_file(path))
tt_1 = btorrent.Torrent(path)
for ilvl, lvl in enumerate(tt_0.announce_list):
for itrk, tracker in enumerate(lvl):
self.assertEqual(tracker.tracker_addr,
tt_1.announce_list[ilvl][itrk].tracker_addr)
| [
"btorrent.Torrent",
"btorrent.TorrentFile.from_file",
"pathlib.Path"
] | [((143, 185), 'pathlib.Path', 'pathlib.Path', (['"""tests/files/test_0.torrent"""'], {}), "('tests/files/test_0.torrent')\n", (155, 185), False, 'import pathlib\n'), ((271, 293), 'btorrent.Torrent', 'btorrent.Torrent', (['path'], {}), '(path)\n', (287, 293), False, 'import btorrent\n'), ((218, 254), 'btorrent.TorrentFile.from_file', 'btorrent.TorrentFile.from_file', (['path'], {}), '(path)\n', (248, 254), False, 'import btorrent\n')] |
import glob
import inspect
import logging
import os
import shutil
import sys
import yaml
from pathlib import Path
from typing import List
import multiply_data_access.data_access_component
from multiply_core.models import get_forward_models
from multiply_core.observations import INPUT_TYPES
from multiply_core.variables import get_registered_variables
from multiply_post_processing import get_available_indicators, get_post_processor_creators, PostProcessorType
from multiply_prior_engine.vegetation_prior_creator import SUPPORTED_VARIABLES as POSSIBLE_USER_PRIORS
from vm_support import set_earth_data_authentication, set_mundi_authentication
from .model import Job
logging.getLogger().setLevel(logging.INFO)
CALVALUS_DIR = os.path.abspath(os.path.join(inspect.getfile(Job), os.pardir, os.pardir, os.pardir, os.pardir, 'calvalus-instances'))
sys.path.insert(0, CALVALUS_DIR)
# check out with git clone -b share https://github.com/bcdev/calvalus-instances
# and add the calvalus-instances as content root to project structure
import share.bin.pmserver as pmserver
MULTIPLY_DIR_NAME = '.multiply'
MULTIPLY_CONFIG_FILE_NAME = 'multiply_config.yaml'
MULTIPLY_PLATFORM_PYTHON_CONFIG_KEY = 'platform-env'
WORKING_DIR_CONFIG_KEY = 'working_dir'
WORKFLOWS_DIRS_CONFIG_KEY = 'workflows_dirs'
SCRIPTS_DIRS_CONFIG_KEY = 'scripts_dirs'
def _get_config() -> dict:
home_dir = str(Path.home())
multiply_home_dir = '{0}/{1}'.format(home_dir, MULTIPLY_DIR_NAME)
if not os.path.exists(multiply_home_dir):
os.mkdir(multiply_home_dir)
path_to_multiply_config_file = '{0}/{1}'.format(multiply_home_dir, MULTIPLY_CONFIG_FILE_NAME)
if os.path.exists(path_to_multiply_config_file):
with open(path_to_multiply_config_file, 'r') as multiply_config_file:
multiply_config = yaml.safe_load(multiply_config_file)
return multiply_config
return {
WORKING_DIR_CONFIG_KEY: f'{multiply_home_dir}/multiply',
WORKFLOWS_DIRS_CONFIG_KEY: [],
SCRIPTS_DIRS_CONFIG_KEY: []
}
class ServiceContext:
def __init__(self):
self._jobs = {}
self.data_access_component = multiply_data_access.data_access_component.DataAccessComponent()
self.pm_server = pmserver.PMServer()
self._python_dist = sys.executable
config = _get_config()
if MULTIPLY_PLATFORM_PYTHON_CONFIG_KEY in config.keys():
self._python_dist = config[MULTIPLY_PLATFORM_PYTHON_CONFIG_KEY]
if WORKING_DIR_CONFIG_KEY in config.keys():
self.set_working_dir(config[WORKING_DIR_CONFIG_KEY])
if WORKFLOWS_DIRS_CONFIG_KEY in config.keys():
for workflows_dir in config[WORKFLOWS_DIRS_CONFIG_KEY]:
logging.info(f'adding workflows dir {workflows_dir}')
self.add_workflows_path(workflows_dir)
if SCRIPTS_DIRS_CONFIG_KEY in config.keys():
for scripts_dir in config[SCRIPTS_DIRS_CONFIG_KEY]:
logging.info(f'adding scripts dir {scripts_dir}')
self.add_scripts_path(scripts_dir)
path_to_lib_dir = os.path.abspath(os.path.join(CALVALUS_DIR, 'share/lib'))
path_to_bin_dir = os.path.abspath(os.path.join(CALVALUS_DIR, 'share/bin'))
sys.path.insert(0, path_to_lib_dir)
sys.path.insert(0, path_to_bin_dir)
path = os.environ['PATH']
os.environ['PATH'] = f'{path_to_bin_dir}:{path}'
@staticmethod
def get_available_forward_models() -> List[dict]:
dict_list = []
forward_models = get_forward_models()
for model in forward_models:
dict_list.append({
"id": model.id,
"name": model.name,
"description": model.description,
"modelAuthors": model.authors,
"modelUrl": model.url,
"inputType": model.model_data_type,
"type": model.inference_engine_type,
"requiredPriors": model.required_priors,
"variables": model.variables
})
return dict_list
@staticmethod
def get_available_input_types() -> List[dict]:
input_types = []
for input_type in INPUT_TYPES:
input_types.append({"id": input_type, "name": INPUT_TYPES[input_type]["input_data_type_name"],
"timeRange": INPUT_TYPES[input_type]["timeRange"]})
return input_types
@staticmethod
def get_available_variables() -> List[dict]:
dict_list = []
variables = get_registered_variables()
for variable in variables:
dict_list.append({
"id": variable.short_name,
"name": variable.display_name,
"unit": variable.unit,
"description": variable.description,
"valueRange": variable.range,
"mayBeUserPrior": variable.short_name in POSSIBLE_USER_PRIORS,
"applications": variable.applications
})
return dict_list
@staticmethod
def get_available_post_processor_indicators() -> List[dict]:
dict_list = []
indicators = get_available_indicators()
for indicator in indicators:
dict_list.append({
"id": indicator.short_name,
"name": indicator.display_name,
"unit": indicator.unit,
"description": indicator.description,
"valueRange": indicator.range,
"mayBeUserPrior": False,
"applications": indicator.applications
})
return dict_list
@staticmethod
def get_available_post_processors() -> List[dict]:
dict_list = []
post_processor_creators = get_post_processor_creators()
for post_processor_creator in post_processor_creators:
indicator_descriptions = post_processor_creator.get_indicator_descriptions()
indicator_names = []
for indicator_description in indicator_descriptions:
indicator_names.append(indicator_description.short_name)
type = 0
if post_processor_creator.get_type() == PostProcessorType.EO_DATA_POST_PROCESSOR:
type = 1
dict_list.append({
"name": post_processor_creator.get_name(),
"description": post_processor_creator.get_description(),
"type": type,
"inputTypes": post_processor_creator.get_required_input_data_types(),
"indicators": indicator_names,
})
return dict_list
@staticmethod
def set_earth_data_authentication(username: str, password: str):
set_earth_data_authentication(username, password)
@staticmethod
def set_mundi_authentication(access_key_id: str, secret_access_key: str):
set_mundi_authentication(access_key_id, secret_access_key)
def set_working_dir(self, working_dir: str):
# todo remove previous working dirs
self._working_dir = working_dir
sys.path.insert(0, working_dir)
os.environ['PATH'] += f':{working_dir}'
@property
def working_dir(self) -> str:
return self._working_dir
@staticmethod
def add_workflows_path(workflows_path: str):
sys.path.insert(0, workflows_path)
os.environ['PATH'] += f':{workflows_path}'
def add_scripts_path(self, scripts_path: str):
sys.path.insert(0, scripts_path)
scripts = glob.glob(f'{scripts_path}/*.py')
for script in scripts:
read_file = open(script, 'r+')
content = read_file.read()
content = content.replace('{PYTHON}', self._python_dist)
read_file.close()
write_file = open(script, 'w')
write_file.write(content)
write_file.close()
os.environ['PATH'] += f':{scripts_path}'
def get_job(self, id: str):
for job in self.pm_server.queue:
if job.request['requestId'] == id:
return job
def clear(self, type: str):
if type == 'cache':
self.data_access_component.clear_caches()
elif type == 'working':
working_dirs = glob.glob('/data/working_dirs/*')
for working_dir in working_dirs:
shutil.rmtree(working_dir)
elif type == 'archive':
archive_dirs = glob.glob('/data/archive/*')
for archive_dir in archive_dirs:
shutil.rmtree(archive_dir)
elif type == 'aux':
aux_files = glob.glob('/data/auxiliary/**', recursive=True)
for aux_file in aux_files:
if os.path.isfile(aux_file) and not aux_file.endswith('bucket_info.json'):
os.remove(aux_file)
| [
"logging.getLogger",
"sys.path.insert",
"multiply_core.models.get_forward_models",
"pathlib.Path.home",
"share.bin.pmserver.PMServer",
"logging.info",
"os.remove",
"os.path.exists",
"multiply_post_processing.get_post_processor_creators",
"multiply_post_processing.get_available_indicators",
"insp... | [((848, 880), 'sys.path.insert', 'sys.path.insert', (['(0)', 'CALVALUS_DIR'], {}), '(0, CALVALUS_DIR)\n', (863, 880), False, 'import sys\n'), ((1649, 1693), 'os.path.exists', 'os.path.exists', (['path_to_multiply_config_file'], {}), '(path_to_multiply_config_file)\n', (1663, 1693), False, 'import os\n'), ((671, 690), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (688, 690), False, 'import logging\n'), ((759, 779), 'inspect.getfile', 'inspect.getfile', (['Job'], {}), '(Job)\n', (774, 779), False, 'import inspect\n'), ((1379, 1390), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1388, 1390), False, 'from pathlib import Path\n'), ((1473, 1506), 'os.path.exists', 'os.path.exists', (['multiply_home_dir'], {}), '(multiply_home_dir)\n', (1487, 1506), False, 'import os\n'), ((1516, 1543), 'os.mkdir', 'os.mkdir', (['multiply_home_dir'], {}), '(multiply_home_dir)\n', (1524, 1543), False, 'import os\n'), ((2234, 2253), 'share.bin.pmserver.PMServer', 'pmserver.PMServer', ([], {}), '()\n', (2251, 2253), True, 'import share.bin.pmserver as pmserver\n'), ((3242, 3277), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path_to_lib_dir'], {}), '(0, path_to_lib_dir)\n', (3257, 3277), False, 'import sys\n'), ((3286, 3321), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path_to_bin_dir'], {}), '(0, path_to_bin_dir)\n', (3301, 3321), False, 'import sys\n'), ((3534, 3554), 'multiply_core.models.get_forward_models', 'get_forward_models', ([], {}), '()\n', (3552, 3554), False, 'from multiply_core.models import get_forward_models\n'), ((4537, 4563), 'multiply_core.variables.get_registered_variables', 'get_registered_variables', ([], {}), '()\n', (4561, 4563), False, 'from multiply_core.variables import get_registered_variables\n'), ((5159, 5185), 'multiply_post_processing.get_available_indicators', 'get_available_indicators', ([], {}), '()\n', (5183, 5185), False, 'from multiply_post_processing import get_available_indicators, get_post_processor_creators, PostProcessorType\n'), ((5754, 5783), 'multiply_post_processing.get_post_processor_creators', 'get_post_processor_creators', ([], {}), '()\n', (5781, 5783), False, 'from multiply_post_processing import get_available_indicators, get_post_processor_creators, PostProcessorType\n'), ((6721, 6770), 'vm_support.set_earth_data_authentication', 'set_earth_data_authentication', (['username', 'password'], {}), '(username, password)\n', (6750, 6770), False, 'from vm_support import set_earth_data_authentication, set_mundi_authentication\n'), ((6876, 6934), 'vm_support.set_mundi_authentication', 'set_mundi_authentication', (['access_key_id', 'secret_access_key'], {}), '(access_key_id, secret_access_key)\n', (6900, 6934), False, 'from vm_support import set_earth_data_authentication, set_mundi_authentication\n'), ((7077, 7108), 'sys.path.insert', 'sys.path.insert', (['(0)', 'working_dir'], {}), '(0, working_dir)\n', (7092, 7108), False, 'import sys\n'), ((7315, 7349), 'sys.path.insert', 'sys.path.insert', (['(0)', 'workflows_path'], {}), '(0, workflows_path)\n', (7330, 7349), False, 'import sys\n'), ((7461, 7493), 'sys.path.insert', 'sys.path.insert', (['(0)', 'scripts_path'], {}), '(0, scripts_path)\n', (7476, 7493), False, 'import sys\n'), ((7512, 7545), 'glob.glob', 'glob.glob', (['f"""{scripts_path}/*.py"""'], {}), "(f'{scripts_path}/*.py')\n", (7521, 7545), False, 'import glob\n'), ((1803, 1839), 'yaml.safe_load', 'yaml.safe_load', (['multiply_config_file'], {}), '(multiply_config_file)\n', (1817, 1839), False, 'import yaml\n'), ((3110, 3149), 'os.path.join', 'os.path.join', (['CALVALUS_DIR', '"""share/lib"""'], {}), "(CALVALUS_DIR, 'share/lib')\n", (3122, 3149), False, 'import os\n'), ((3193, 3232), 'os.path.join', 'os.path.join', (['CALVALUS_DIR', '"""share/bin"""'], {}), "(CALVALUS_DIR, 'share/bin')\n", (3205, 3232), False, 'import os\n'), ((2725, 2778), 'logging.info', 'logging.info', (['f"""adding workflows dir {workflows_dir}"""'], {}), "(f'adding workflows dir {workflows_dir}')\n", (2737, 2778), False, 'import logging\n'), ((2967, 3016), 'logging.info', 'logging.info', (['f"""adding scripts dir {scripts_dir}"""'], {}), "(f'adding scripts dir {scripts_dir}')\n", (2979, 3016), False, 'import logging\n'), ((8241, 8274), 'glob.glob', 'glob.glob', (['"""/data/working_dirs/*"""'], {}), "('/data/working_dirs/*')\n", (8250, 8274), False, 'import glob\n'), ((8336, 8362), 'shutil.rmtree', 'shutil.rmtree', (['working_dir'], {}), '(working_dir)\n', (8349, 8362), False, 'import shutil\n'), ((8422, 8450), 'glob.glob', 'glob.glob', (['"""/data/archive/*"""'], {}), "('/data/archive/*')\n", (8431, 8450), False, 'import glob\n'), ((8512, 8538), 'shutil.rmtree', 'shutil.rmtree', (['archive_dir'], {}), '(archive_dir)\n', (8525, 8538), False, 'import shutil\n'), ((8591, 8638), 'glob.glob', 'glob.glob', (['"""/data/auxiliary/**"""'], {'recursive': '(True)'}), "('/data/auxiliary/**', recursive=True)\n", (8600, 8638), False, 'import glob\n'), ((8697, 8721), 'os.path.isfile', 'os.path.isfile', (['aux_file'], {}), '(aux_file)\n', (8711, 8721), False, 'import os\n'), ((8789, 8808), 'os.remove', 'os.remove', (['aux_file'], {}), '(aux_file)\n', (8798, 8808), False, 'import os\n')] |
import csv
import random
exercises_filename = "exercises.csv"
target_muscle = "triceps"
surprise_me = False
def get_list_of_exercies(muscle, surprise):
exercises = [d for d in data if d["muscle"].lower() == muscle.lower()]
if len(exercises) == 0:
return []
if surprise:
return [exercises[random.randrange(len(exercises))]["name"]]
exercises = sorted(exercises, key=lambda e: -float(e["rating"]))
return [e["name"] for e in exercises]
with open(exercises_filename) as f:
data = list(csv.DictReader(f))
print(get_list_of_exercies(target_muscle, surprise_me))
| [
"csv.DictReader"
] | [((531, 548), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (545, 548), False, 'import csv\n')] |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="populate_ssm",
version="0.2.0",
author="<NAME>",
author_email="<EMAIL>",
description="Populate AWS SSM Parameter Store from .env File",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deploymode/populate-ssm",
project_urls={
"Bug Tracker": "https://github.com/deploymode/populate-ssm/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
install_requires=["boto3==1.18.33", "python-dotenv==0.19.0"],
setup_requires=["flake8"],
entry_points={
"console_scripts": [
"populate-ssm = populate_ssm.__main__:main",
],
},
)
| [
"setuptools.find_packages"
] | [((742, 779), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (766, 779), False, 'import setuptools\n')] |
import json
import httplib2
from graphipy.graph.graph_base import BaseNode as Node, BaseEdge as Edge
class Pinterest:
def __init__(self, api):
self.access_token = api["access_token"]
# get a single user info in JSON format by username
def get_single_user(self, username):
url = "https://api.pinterest.com/v1/users/" + username + "/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get a single board info in JSON format by board_url
def get_single_board(self, board_url):
url = "https://api.pinterest.com/v1/boards/" + board_url + "/?access_token=" + self.access_token + \
"&fields=id%2Cname%2Curl%2Ccounts%2Ccreated_at%2Ccreator%2Cdescription%2Cimage%2Cprivacy"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get a single pin info in JSON format by pin_id
def get_single_pin(self, pin_id):
url = "https://api.pinterest.com/v1/pins/" + pin_id + "/?access_token=" + self.access_token + \
"&fields=id%2Clink%2Cnote%2Curl%2Cattribution%2Cboard%2Ccolor%2Coriginal_link%2Ccounts%2Ccreated_at%2Ccreator%2Cimage%2Cmedia"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get all pins on one board in JSON format by board_url
def get_pins_from_board(self, board_url):
url = "https://api.pinterest.com/v1/boards/" + board_url + \
"/pins/?access_token=" + self.access_token + "&fields=id"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get the graph for a single user by username
def fetch_pinterest_user_by_username(self, graph, username):
result = self.get_single_user(username)
user = PinterestUser(result["data"])
graph.create_node(user)
# get the graph for a single board by board_url
def fetch_pinterest_board_by_url(self, graph, board_url):
board_result = self.get_single_board(board_url)
board = PinterestBoard(board_result["data"])
graph.create_node(board)
creator_username = board_result["data"]["creator"]["url"].split('/')[3]
user_result = self.get_single_user(creator_username)
user = PinterestUser(user_result["data"])
graph.create_node(user)
graph.create_edge(Edge(board.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), board.get_id(), "CREATED"))
pin_result = self.get_pins_from_board(board_url)
for pin in pin_result["data"]:
single_pin_result = self.get_single_pin(pin["id"])
single_pin = PinterestPin(single_pin_result["data"])
graph.create_node(single_pin)
graph.create_edge(Edge(board.get_id(), single_pin.get_id(), "HAS"))
graph.create_edge(Edge(single_pin.get_id(), board.get_id(), "ON"))
# get the graph for a single pin by pin_id
def fetch_pinterest_pin_by_id(self, graph, pin_id):
pin_result = self.get_single_pin(pin_id)
pin = PinterestPin(pin_result["data"])
graph.create_node(pin)
creator_username = pin_result["data"]["creator"]["url"].split('/')[3]
user_result = self.get_single_user(creator_username)
user = PinterestUser(user_result["data"])
graph.create_node(user)
graph.create_edge(Edge(pin.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), pin.get_id(), "CREATED"))
board_url = pin_result["data"]["board"]["url"].split(
'/')[3] + "/" + pin_result["data"]["board"]["url"].split('/')[4]
board_result = self.get_single_board(board_url)
board = PinterestBoard(board_result["data"])
graph.create_node(board)
graph.create_edge(Edge(pin.get_id(), board.get_id(), "ON"))
graph.create_edge(Edge(board.get_id(), pin.get_id(), "HAS"))
# get the graph for mine as user node
def fetch_pinterest_my_usernode(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
# get the graph of my boards
def fetch_pinterest_my_boards(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Cbio%2Caccount_type%2Ccounts%2Ccreated_at%2Cimage%2Cusername"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/boards/?access_token=" + self.access_token + \
"&fields=id%2Cname%2Curl%2Ccounts%2Ccreated_at%2Ccreator%2Cdescription%2Cimage%2Cprivacy"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myboard in result["data"]:
board = PinterestBoard(myboard)
graph.create_node(board)
graph.create_edge(Edge(board.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), board.get_id(), "CREATED"))
# get the graph of my pins
def fetch_pinterest_my_pins(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Cbio%2Caccount_type%2Ccounts%2Ccreated_at%2Cimage%2Cusername"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/pins/?access_token=" + self.access_token + \
"&fields=id%2Clink%2Cnote%2Curl%2Cattribution%2Cboard%2Ccolor%2Coriginal_link%2Ccounts%2Ccreated_at%2Ccreator%2Cimage%2Cmedia"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for mypin in result["data"]:
pin = PinterestPin(mypin)
graph.create_node(pin)
graph.create_edge(Edge(pin.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), pin.get_id(), "CREATED"))
# get the graph of my followers
def fetch_pinterest_my_followers(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/followers/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myfollower in result["data"]:
follower = PinterestUser(myfollower)
graph.create_node(follower)
graph.create_edge(Edge(user.get_id(), follower.get_id(), "FOLLOWED_BY"))
# get the graph of my following users
def fetch_pinterest_my_following_users(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/following/users/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myfollowing in result["data"]:
following = PinterestUser(myfollowing)
graph.create_node(following)
graph.create_edge(Edge(user.get_id(), following.get_id(), "FOLLOWING"))
# get the graph of my following boards
def fetch_pinterest_my_following_boards(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/following/boards/?access_token=" + self.access_token + \
"&fields=id%2Cname%2Curl%2Ccounts%2Ccreated_at%2Ccreator%2Cdescription%2Cimage%2Cprivacy"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myfollowingboard in result["data"]:
followingboard = PinterestBoard(myfollowingboard)
graph.create_node(followingboard)
graph.create_edge(Edge(user.get_id(), followingboard.get_id(), "FOLLOWING"))
creator_username = myfollowingboard["creator"]["url"].split('/')[3]
creator_result = self.get_single_user(creator_username)
creator = PinterestUser(creator_result["data"])
graph.create_node(creator)
graph.create_edge(Edge(followingboard.get_id(), creator.get_id(), "CREATED_BY"))
graph.create_edge(Edge(creator.get_id(), followingboard.get_id(), "CREATED"))
board_url = myfollowingboard["url"].split(
'/')[3] + "/" + myfollowingboard["url"].split('/')[4]
pin_result = self.get_pins_from_board(board_url)
for pin in pin_result["data"]:
single_pin_result = self.get_single_pin(pin["id"])
single_pin = PinterestPin(single_pin_result["data"])
graph.create_node(single_pin)
graph.create_edge(Edge(followingboard.get_id(), single_pin.get_id(), "HAS"))
graph.create_edge(Edge(single_pin.get_id(), followingboard.get_id(), "ON"))
# User node of Pinterest
class PinterestUser(Node):
def __init__(self, result):
label = result["first_name"] + " " + result["last_name"]
Node.__init__(self, result["id"], label, "user")
self.username = result["username"]
self.first_name = result["first_name"]
self.last_name = result["last_name"]
self.bio = result["bio"]
self.account_type = result["account_type"]
self.url = result["url"]
self.image_url = result["image"]["60x60"]["url"]
self.created_at = result["created_at"]
self.pins_count = result["counts"]["pins"]
self.following_count = result["counts"]["following"]
self.followers_count = result["counts"]["followers"]
self.boards_count = result["counts"]["boards"]
# Board node of Pinterest
class PinterestBoard(Node):
def __init__(self, result):
Node.__init__(self, result["id"], result["name"], "board")
self.name = result["name"]
self.url = result["url"]
self.image_url = result["image"]["60x60"]["url"]
self.created_at = result["created_at"]
self.privacy = result["privacy"]
self.pins_count = result["counts"]["pins"]
self.collaborators_count = result["counts"]["collaborators"]
self.followers_count = result["counts"]["followers"]
self.description = result["description"]
# Pin node of Pinterest
class PinterestPin(Node):
def __init__(self, result):
Node.__init__(self, result["id"], "pin_" + result["id"], "pin")
self.url = result["url"]
self.image_url = result["image"]["original"]["url"]
self.link = result["link"]
self.media = result["media"]["type"]
self.original_link = result["original_link"]
self.created_at = result["created_at"]
self.note = result["note"]
self.color = result["color"]
self.saves = result["counts"]["saves"]
self.comments = result["counts"]["comments"]
| [
"httplib2.Http",
"graphipy.graph.graph_base.BaseNode.__init__"
] | [((536, 551), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (549, 551), False, 'import httplib2\n'), ((1008, 1023), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (1021, 1023), False, 'import httplib2\n'), ((1502, 1517), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (1515, 1517), False, 'import httplib2\n'), ((1907, 1922), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (1920, 1922), False, 'import httplib2\n'), ((4690, 4705), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (4703, 4705), False, 'import httplib2\n'), ((5194, 5209), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (5207, 5209), False, 'import httplib2\n'), ((5605, 5620), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (5618, 5620), False, 'import httplib2\n'), ((6307, 6322), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (6320, 6322), False, 'import httplib2\n'), ((6753, 6768), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (6766, 6768), False, 'import httplib2\n'), ((7451, 7466), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (7464, 7466), False, 'import httplib2\n'), ((7881, 7896), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (7894, 7896), False, 'import httplib2\n'), ((8542, 8557), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (8555, 8557), False, 'import httplib2\n'), ((8978, 8993), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (8991, 8993), False, 'import httplib2\n'), ((9644, 9659), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (9657, 9659), False, 'import httplib2\n'), ((10065, 10080), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (10078, 10080), False, 'import httplib2\n'), ((11623, 11671), 'graphipy.graph.graph_base.BaseNode.__init__', 'Node.__init__', (['self', "result['id']", 'label', '"""user"""'], {}), "(self, result['id'], label, 'user')\n", (11636, 11671), True, 'from graphipy.graph.graph_base import BaseNode as Node, BaseEdge as Edge\n'), ((12352, 12410), 'graphipy.graph.graph_base.BaseNode.__init__', 'Node.__init__', (['self', "result['id']", "result['name']", '"""board"""'], {}), "(self, result['id'], result['name'], 'board')\n", (12365, 12410), True, 'from graphipy.graph.graph_base import BaseNode as Node, BaseEdge as Edge\n'), ((12946, 13009), 'graphipy.graph.graph_base.BaseNode.__init__', 'Node.__init__', (['self', "result['id']", "('pin_' + result['id'])", '"""pin"""'], {}), "(self, result['id'], 'pin_' + result['id'], 'pin')\n", (12959, 13009), True, 'from graphipy.graph.graph_base import BaseNode as Node, BaseEdge as Edge\n')] |
import numpy as np
import itertools
import math
class SubwayFinder:
def find_subways(self, grouped_classifications):
if not all(letter in grouped_classifications for letter in ("S","U","B","W","A","Y")):
print("Can not find all parts of logo")
return []
sorted_classifications = [grouped_classifications[letter] for letter in ("S","U","B","W","A","Y")]
# import pdb; pdb.set_trace()
combinations = list(itertools.product(*sorted_classifications))
result = []
errors = [self._mean_squared_error(combination) for combination in combinations]
combinations_with_errors = sorted(zip(combinations, errors), key=lambda pair: pair[1])
while(len(combinations_with_errors) > 0):
best, _error = self._best_fitted_combination(combinations_with_errors)
if best == None: break
result.append(best)
combinations_with_errors = self._remove_combination(combinations_with_errors, best)
return result
def _mean_squared_error(self, classifications):
y = [np.mean(classification.segment.box[0]) for classification in classifications]
x = [np.mean(classification.segment.box[1]) for classification in classifications]
a, b = np.polyfit(x, y, 1)
prediction = np.vectorize(lambda x: a*x + b)
return np.mean((prediction(x) - y) ** 2)
def _best_fitted_combination(self, combinations_with_errors):
if len(combinations_with_errors) < 1 or combinations_with_errors[0][1] > 100:
return(None, math.inf)
return combinations_with_errors[0]
def _remove_combination(self, combinations, best):
rest_combinations = []
best_segments = [classification.segment for classification in best]
for combination, error in combinations:
combination_segments = [classification.segment for classification in combination]
if (combination_segments[0] != best_segments[0] and
combination_segments[1] != best_segments[1] and
combination_segments[2] != best_segments[2] and
combination_segments[3] != best_segments[3] and
combination_segments[4] != best_segments[4] and
combination_segments[5] != best_segments[5]):
rest_combinations.append((combination, error))
return rest_combinations
| [
"numpy.mean",
"itertools.product",
"numpy.vectorize",
"numpy.polyfit"
] | [((1286, 1305), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (1296, 1305), True, 'import numpy as np\n'), ((1327, 1360), 'numpy.vectorize', 'np.vectorize', (['(lambda x: a * x + b)'], {}), '(lambda x: a * x + b)\n', (1339, 1360), True, 'import numpy as np\n'), ((465, 507), 'itertools.product', 'itertools.product', (['*sorted_classifications'], {}), '(*sorted_classifications)\n', (482, 507), False, 'import itertools\n'), ((1101, 1139), 'numpy.mean', 'np.mean', (['classification.segment.box[0]'], {}), '(classification.segment.box[0])\n', (1108, 1139), True, 'import numpy as np\n'), ((1192, 1230), 'numpy.mean', 'np.mean', (['classification.segment.box[1]'], {}), '(classification.segment.box[1])\n', (1199, 1230), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from autodcf.models._base import AbstractDCF
from datetime import datetime
class DCF(AbstractDCF):
"""Class for flexible DCF.
Note that all _to_sales args take either an iterable or float. If given a float, the DCF will
use this constant across all time periods (ex: if given 0.45 for COGS, COGS will be 45% of sales
for all forecasted periods). If given iterable, the first value will be the value used for the first
year in the forecast and the last value will be the value used in the terminal year.
Args:
company (autodcf.company.Company): Company to do DCF analysis for.
sales_growth (Union[Iterable, float]): Iterable of sales growth numbers to iterate over or constant growth rate.
Values are in order, so first value in iterable applies to next sales period and
last value applies to last sales period in DCF. Note, if you want to have 5% sales growth, use 0.05.
discount_rate (float): Rate at which cash flow should be discounted.
terminal_growth_rate (float): Rate at which sales are estimated to grow after returning to normal profit levels.
window (int): Number of years until company returns to normal profit margins (terminal year).
cogs_to_sales (Union[Iterable, float]): COGS as % of sales.
sga_to_sales (Union[Iterable, float]): SGA as % of sales.
rd_to_sales (Union[Iterable, float]): R&D as % of sales.
da_to_sales (Union[Iterable, float]): Depreciation & amortization as % of sales. Assumes amortization is tax
deductible.
interest_to_sales (Union[Iterable, float]): Interest as % of sales.
tax_rate (float): Tax rate.
capex_to_sales (Union[Iterable, float]): Capex as % of sales.
change_in_nwc_to_change_in_sales (float): Ratio of how much net working capital must change to increase sales by
1 unit.
"""
def __init__(self,
company,
sales_growth,
discount_rate,
terminal_growth_rate,
window,
cogs_to_sales,
sga_to_sales,
rd_to_sales,
da_to_sales,
interest_to_sales,
tax_rate,
capex_to_sales,
change_in_nwc_to_change_in_sales,
terminal_discount_rate=None):
self._company = company
self._sales_growth = sales_growth
self._discount_rate = discount_rate
self._terminal_growth_rate = terminal_growth_rate
self._window = window
self._cogs_to_sales = cogs_to_sales
self._sga_to_sales = sga_to_sales
self._rd_to_sales = rd_to_sales
self._da_to_sales = da_to_sales
self._interest_to_sales = interest_to_sales
self._tax_rate = tax_rate
self._capex_to_sales = capex_to_sales
self._change_in_nwc_to_change_in_sales = change_in_nwc_to_change_in_sales
self._forecast = pd.DataFrame(index=np.arange(-1, self.window + 1))
self._terminal_discount_rate = discount_rate if terminal_discount_rate is None else terminal_discount_rate
@property
def company(self):
"""Company object to do DCF for."""
return self._company
@property
def sales_growth(self):
"""Numpy array of sales growth for each year until end of window."""
return self._sales_growth
@property
def discount_rate(self):
"""Discount rate to discount cash flow at."""
return self._discount_rate
@property
def terminal_discount_rate(self):
"""Discount rate after terminal year."""
return self._terminal_discount_rate
@property
def terminal_growth_rate(self):
"""Rate at which sales are expected to grow perpetually."""
return self._terminal_growth_rate
@property
def window(self):
"""Periods of normal sales growth until terminal growth rate takes over."""
return self._window
@property
def cogs_to_sales(self):
"""Cost of goods sold as a percentage of sales."""
return self._cogs_to_sales
@property
def sga_to_sales(self):
"""Selling, general, and administrative costs as a percentage of sales."""
return self._sga_to_sales
@property
def rd_to_sales(self):
"""Research and development costs as a percentage of sales."""
return self._rd_to_sales
@property
def da_to_sales(self):
"""Depreciation and amortization as a percentage of sales."""
return self._da_to_sales
@property
def interest_to_sales(self):
"""Interest expense as a percentage of sales."""
return self._interest_to_sales
@property
def tax_rate(self):
"""Effective tax rate for company."""
return self._tax_rate
@property
def capex_to_sales(self):
"""Capital expenditures as a percentage of sales."""
return self._capex_to_sales
@property
def change_in_nwc_to_change_in_sales(self):
"""How much net working capital is expected to need to increase for each dollar increase in sales."""
return self._change_in_nwc_to_change_in_sales
def _calculate_sales(self):
"""Calculate sales for window of growth.
Returns:
Numpy array with sales from each period in order.
"""
sales_growth = np.repeat(self.sales_growth, self.window + 1) if isinstance(self.sales_growth,
float) else self.sales_growth
initial_sales = self.company.income_statement.sales
return np.concatenate(([initial_sales], initial_sales * np.cumprod(1 + sales_growth)))
def _multiply_by_sales_percent(self, percent_of_sales):
"""Find values for stat in all periods given percent of sales stat accounts for.
Returns:
Pandas series with statistic multiplied by forecast Sales values.
"""
return self._forecast['Sales'] * percent_of_sales
def _calculate_free_cash_flow(self):
"""Calculate free cash flow for each period.
Returns:
Pandas Series with free cash flow for each period in forecast.
"""
return self._forecast['Net Income'] + self._forecast['D&A'] - self._forecast['Capex'] - self._forecast[
'Change in NWC']
def _discount_cash_flows(self):
"""Discount cash flows at given discount rate."""
discount_factors = np.array([1 / (1 + self.discount_rate) ** i for i in range(self.window + 1)])
return self._forecast.loc[0:, 'FCF'] * discount_factors
def forecast(self):
"""Get pandas dataframe with all info needed to complete forecast.
Returns:
forecast (pd.DataFrame): Pandas data frame with forecasted future income statements and discounted
free cash flows.
"""
self._forecast['Year'] = np.arange(datetime.now().year - 1, datetime.now().year + self.window + 1)
self._forecast['Sales'] = self._calculate_sales()
self._forecast['COGS'] = self._multiply_by_sales_percent(self.cogs_to_sales)
self._forecast['Gross Profit'] = self._forecast['Sales'] - self._forecast['COGS']
self._forecast['SG&A'] = self._multiply_by_sales_percent(self.sga_to_sales)
self._forecast['Operating Profit'] = self._forecast['Gross Profit'] - self._forecast['SG&A']
self._forecast['R&D'] = self._multiply_by_sales_percent(self.rd_to_sales)
self._forecast['EBITDA'] = self._forecast['Operating Profit'] - self._forecast['R&D']
self._forecast['D&A'] = self._multiply_by_sales_percent(self.da_to_sales)
self._forecast['EBIT'] = self._forecast['EBITDA'] - self._forecast['D&A'] # noqa:E501
self._forecast['Interest'] = self._multiply_by_sales_percent(self.interest_to_sales)
self._forecast['EBT'] = self._forecast['EBIT'] - self._forecast['Interest']
self._forecast['Taxes'] = self._forecast['EBT'] * self.tax_rate
self._forecast.loc[-1, 'Taxes'] = self.company.income_statement.tax
self._forecast['Net Income'] = self._forecast['EBT'] - self._forecast['Taxes']
self._forecast['Capex'] = self._multiply_by_sales_percent(self.capex_to_sales)
# ΔSales * ΔNWC/ΔSales = ΔNWC
change_in_sales = np.diff(self._forecast['Sales'])
future_changes_nwc = change_in_sales * self.change_in_nwc_to_change_in_sales
self._forecast['Change in NWC'] = np.concatenate(([0.0], future_changes_nwc))
self._forecast['FCF'] = self._calculate_free_cash_flow()
self._forecast['Discounted FCF'] = self._discount_cash_flows()
return self._forecast
@property
def enterprise_value(self):
"""Enterprise value given by discounted cash flow analysis."""
return self.discounted_window_cash_flow + self.discounted_terminal_cash_flow
@property
def equity_value(self):
"""Returns total equity value of firm."""
return self.enterprise_value - self.company.balance_sheet.net_debt
@property
def equity_value_per_share(self):
"""Equity value divided by total number of shares outstanding."""
return self.equity_value / self.company.fully_diluted_shares
@property
def discounted_terminal_cash_flow(self):
"""Sum of discounted cash flows after window."""
f = self.forecast()
last_fcf = f.loc[self.window, 'Discounted FCF']
terminal_discount_minus_growth = (self.terminal_discount_rate - self.terminal_growth_rate)
tv_discounted_to_window = last_fcf * (1 + self.terminal_growth_rate) / terminal_discount_minus_growth
return tv_discounted_to_window / (1 + self.discount_rate) ** self.window
@property
def discounted_window_cash_flow(self):
"""Add up discounted cash flows from window."""
f = self.forecast()
return f.loc[0:, 'Discounted FCF'].sum()
@property
def absolute_upside_per_share(self):
return self.equity_value_per_share - self.company.price_per_share
@property
def percent_upside_per_share(self):
return self.absolute_upside_per_share / self.company.price_per_share
| [
"numpy.repeat",
"numpy.arange",
"numpy.diff",
"datetime.datetime.now",
"numpy.concatenate",
"numpy.cumprod"
] | [((8472, 8504), 'numpy.diff', 'np.diff', (["self._forecast['Sales']"], {}), "(self._forecast['Sales'])\n", (8479, 8504), True, 'import numpy as np\n'), ((8632, 8675), 'numpy.concatenate', 'np.concatenate', (['([0.0], future_changes_nwc)'], {}), '(([0.0], future_changes_nwc))\n', (8646, 8675), True, 'import numpy as np\n'), ((5489, 5534), 'numpy.repeat', 'np.repeat', (['self.sales_growth', '(self.window + 1)'], {}), '(self.sales_growth, self.window + 1)\n', (5498, 5534), True, 'import numpy as np\n'), ((3074, 3104), 'numpy.arange', 'np.arange', (['(-1)', '(self.window + 1)'], {}), '(-1, self.window + 1)\n', (3083, 3104), True, 'import numpy as np\n'), ((5805, 5833), 'numpy.cumprod', 'np.cumprod', (['(1 + sales_growth)'], {}), '(1 + sales_growth)\n', (5815, 5833), True, 'import numpy as np\n'), ((7074, 7088), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7086, 7088), False, 'from datetime import datetime\n'), ((7099, 7113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7111, 7113), False, 'from datetime import datetime\n')] |
import json
import re
import requests
from lxml import etree
def get(url: str) -> dict:
"""
title、imgs、videos
"""
data = {}
headers = {
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25",
"Cookie": "did=web_68e0268146694843a92700d2de49a0a6;"
}
# rewrite the desktop url
temp = re.findall(r'live\.kuaishou\.com/u/\w+/(\w+)', url)
if temp:
url = 'https://c.kuaishou.com/fw/photo/{}'.format(temp[0])
rep = requests.get(url, headers=headers, timeout=10)
if rep.status_code == 200:
tree = etree.HTML(rep.text) # pylint: disable=c-extension-no-member
# title
desc = tree.xpath(r"//meta[@name='description']/@content")
if desc:
data['title'] = desc[0]
# imgs
imgs = tree.xpath(r"//img[@class='play-long-image']/@src")
if imgs:
data['imgs'] = ["https:" + i for i in imgs]
# videos
hide_data = tree.xpath(r"//div[@id='hide-pagedata']/@data-pagedata")
if hide_data:
try:
data_ = json.loads(hide_data[0])
data['videos'] = [data_['video']['srcNoMark']]
data['title'] = data['videoName'] = data_['video']['caption']
except Exception:
pass
return data
return {'msg': 'failed...'}
if __name__ == "__main__":
from pprint import pprint
pprint(get(input("url: ")))
| [
"re.findall",
"lxml.etree.HTML",
"requests.get",
"json.loads"
] | [((429, 483), 're.findall', 're.findall', (['"""live\\\\.kuaishou\\\\.com/u/\\\\w+/(\\\\w+)"""', 'url'], {}), "('live\\\\.kuaishou\\\\.com/u/\\\\w+/(\\\\w+)', url)\n", (439, 483), False, 'import re\n'), ((572, 618), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'timeout': '(10)'}), '(url, headers=headers, timeout=10)\n', (584, 618), False, 'import requests\n'), ((665, 685), 'lxml.etree.HTML', 'etree.HTML', (['rep.text'], {}), '(rep.text)\n', (675, 685), False, 'from lxml import etree\n'), ((1178, 1202), 'json.loads', 'json.loads', (['hide_data[0]'], {}), '(hide_data[0])\n', (1188, 1202), False, 'import json\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-18 09:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dynamic_schemas', '0023_auto_20180118_1030'),
]
operations = [
migrations.RenameModel(
old_name='SchemaHelpUrl',
new_name='SchemaUrl',
),
]
| [
"django.db.migrations.RenameModel"
] | [((300, 370), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""SchemaHelpUrl"""', 'new_name': '"""SchemaUrl"""'}), "(old_name='SchemaHelpUrl', new_name='SchemaUrl')\n", (322, 370), False, 'from django.db import migrations\n')] |
import asyncio
import json
import logging
from typing import List, Set
import websockets
class BrowserWebsocketServer:
"""
The BrowserWebsocketServer manages our connection to our browser extension,
brokering messages between Google Meet and our plugin's EventHandler.
We expect browser tabs (and our websockets) to come and go, and our plugin is
long-lived, so we have a lot of exception handling to do here to keep the
plugin running. Most actions are "best effort".
We also have to handle the possibility of multiple browser websockets at the
same time, e.g. in case the user refreshes their Meet window and we have stale
websockets hanging around, or if we have multiple Meet tabs.
"""
def __init__(self):
"""
Remember to call start() before attempting to use your new instance!
"""
self._logger = logging.getLogger(__name__)
"""
Store all of the connected sockets we have open to the browser extension,
so we can use them to send outbound messages from this plugin to the
extension.
"""
self._ws_clients: Set[websockets.WebSocketServerProtocol] = set()
"""
Any EventHandlers registered to receive inbound events from the browser extension.
"""
self._handlers: List["EventHandler"] = []
def start(self, hostname: str, port: int) -> None:
return websockets.serve(self._message_receive_loop, hostname, port)
async def send_to_clients(self, message: str) -> None:
"""
Send a message from our plugin to the Chrome extension. We broadcast to
any connections we have, in case the user has multiple Meet windows/tabs
open.
"""
if self._ws_clients:
self._logger.info(
f"Broadcasting message to connected browser clients: {message}")
await asyncio.wait([client.send(message) for client in self._ws_clients])
else:
self._logger.warn(
("There were no active browser extension clients to send our"
f" message to! Message: {message}"))
def register_event_handler(self, handler: "EventHandler") -> None:
"""
Register your EventHandler to have it receive callbacks whenever we
get an event over the wire from the browser extension.
"""
self._handlers.append(handler)
def num_connected_clients(self) -> int:
return len(self._ws_clients)
def _register_client(self, ws: websockets.WebSocketServerProtocol) -> None:
self._ws_clients.add(ws)
self._logger.info(
(f"{ws.remote_address} has connected to our browser websocket."
f" We now have {len(self._ws_clients)} active connection(s)."))
async def _unregister_client(self, ws: websockets.WebSocketServerProtocol) -> None:
try:
await ws.close()
except:
self._logger.exception(
"Exception while closing browser webocket connection.")
if ws in self._ws_clients:
self._ws_clients.remove(ws)
self._logger.info(
(f"{ws.remote_address} has disconnected from our browser websocket."
f" We now have {len(self._ws_clients)} active connection(s) remaining."))
async def _message_receive_loop(self, ws: websockets.WebSocketServerProtocol, uri: str) -> None:
"""
Loop of waiting for and processing inbound websocket messages, until the
connection dies. Each connection will create one of these coroutines.
"""
self._register_client(ws)
try:
async for message in ws:
self._logger.info(
f"Received inbound message from browser extension. Message: {message}")
await self._process_inbound_message(message)
except:
self._logger.exception(
"BrowserWebsocketServer encountered an exception while waiting for inbound messages.")
finally:
await self._unregister_client(ws)
if not self._ws_clients:
for handler in self._handlers:
try:
await handler.on_all_browsers_disconnected()
except:
self._logger.exception(
"Connection mananger received an exception from EventHandler!")
async def _process_inbound_message(self, message: str) -> None:
"""
Process one individual inbound websocket message.
"""
try:
parsed_event = json.loads(message)
except:
self._logger.exception(
f"Failed to parse browser websocket message as JSON. Message: {message}")
return
for handler in self._handlers:
try:
await handler.on_browser_event(parsed_event)
except:
self._logger.exception(
"Connection mananger received an exception from EventHandler!")
| [
"logging.getLogger",
"websockets.serve",
"json.loads"
] | [((884, 911), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (901, 911), False, 'import logging\n'), ((1426, 1486), 'websockets.serve', 'websockets.serve', (['self._message_receive_loop', 'hostname', 'port'], {}), '(self._message_receive_loop, hostname, port)\n', (1442, 1486), False, 'import websockets\n'), ((4610, 4629), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (4620, 4629), False, 'import json\n')] |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.stderr.write('''
Warning
Modules in the "future" directory (dmrgscf, fciqmcscf, shciscf, icmspt, xianci)
have been moved to pyscf/pyscf directory. You can still import these modules.
from the "future" directory, and they work the same as before.
To avoid name conflicts with python built-in module "future", this directory
will be deleted in future release.
''')
| [
"sys.stderr.write"
] | [((623, 1004), 'sys.stderr.write', 'sys.stderr.write', (['"""\n\nWarning\n\nModules in the "future" directory (dmrgscf, fciqmcscf, shciscf, icmspt, xianci)\nhave been moved to pyscf/pyscf directory. You can still import these modules.\nfrom the "future" directory, and they work the same as before.\n\nTo avoid name conflicts with python built-in module "future", this directory\nwill be deleted in future release.\n\n"""'], {}), '(\n """\n\nWarning\n\nModules in the "future" directory (dmrgscf, fciqmcscf, shciscf, icmspt, xianci)\nhave been moved to pyscf/pyscf directory. You can still import these modules.\nfrom the "future" directory, and they work the same as before.\n\nTo avoid name conflicts with python built-in module "future", this directory\nwill be deleted in future release.\n\n"""\n )\n', (639, 1004), False, 'import sys\n')] |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import tempfile
import megengine.data as data
import pytest
from basecls.configs import BaseConfig
from basecls.data import ColorAugment, build_dataloader
@pytest.mark.parametrize("train", [True, False])
def test_build_folderloader(train):
with tempfile.TemporaryDirectory() as dataset_path:
subset_name = "train" if train else "val"
data_dict = dict(num_workers=2)
data_dict[f"{subset_name}_path"] = dataset_path
cfg = BaseConfig(data=data_dict)
augments = ColorAugment.build(cfg) if train else None
dataloader = build_dataloader(cfg, train, augments, "folder")
assert isinstance(dataloader, data.DataLoader)
| [
"basecls.configs.BaseConfig",
"tempfile.TemporaryDirectory",
"basecls.data.build_dataloader",
"pytest.mark.parametrize",
"basecls.data.ColorAugment.build"
] | [((242, 289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""train"""', '[True, False]'], {}), "('train', [True, False])\n", (265, 289), False, 'import pytest\n'), ((335, 364), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (362, 364), False, 'import tempfile\n'), ((542, 568), 'basecls.configs.BaseConfig', 'BaseConfig', ([], {'data': 'data_dict'}), '(data=data_dict)\n', (552, 568), False, 'from basecls.configs import BaseConfig\n'), ((653, 701), 'basecls.data.build_dataloader', 'build_dataloader', (['cfg', 'train', 'augments', '"""folder"""'], {}), "(cfg, train, augments, 'folder')\n", (669, 701), False, 'from basecls.data import ColorAugment, build_dataloader\n'), ((589, 612), 'basecls.data.ColorAugment.build', 'ColorAugment.build', (['cfg'], {}), '(cfg)\n', (607, 612), False, 'from basecls.data import ColorAugment, build_dataloader\n')] |
from utils import BadID, ParsedData
from .utils import get_page, get_file
from bs4 import BeautifulSoup
from loguru import logger
BASE_LINK = "https://ok.ru/profile/{}"
def _get_link(user_id: str) -> str:
if not user_id.isdigit():
raise BadID
if int(user_id) <= 0:
raise BadID
return BASE_LINK.format(user_id)
async def ok(user_id: str) -> ParsedData:
page_link = _get_link(user_id)
page = BeautifulSoup(await get_page(page_link), features="html.parser")
if page.find("h1", {"tsid": "page-not-found"}):
logger.debug(f"404 for {page_link}")
raise BadID
image_element = page.find("img", {"id": "viewImageLinkId"})
return ParsedData(
face=await get_file("https:" + image_element["src"]),
traits={
"name": [" ".join([x.title() for x in image_element["alt"].split()])],
"ok_url": [page_link]
}
)
| [
"loguru.logger.debug"
] | [((560, 596), 'loguru.logger.debug', 'logger.debug', (['f"""404 for {page_link}"""'], {}), "(f'404 for {page_link}')\n", (572, 596), False, 'from loguru import logger\n')] |
import logging
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from zapi.forms import LoginForm
logger = logging.getLogger(__name__)
def index(request):
return render(request, "index.html")
def login(request):
if request.method == 'GET':
form = LoginForm()
return render(request, 'login.html', {'form': form})
else:
form = LoginForm(request.POST)
if form.is_valid():
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user and user.is_active:
auth.login(request, user)
logger.info("+" * 100)
logger.info(f"({username}) 登陆了")
logger.info("+" * 100)
return HttpResponseRedirect('/')
else:
logger.error("x" * 100)
logger.error(f"({username}) 尝试登录,输入了错误的密码({password})")
logger.error("x" * 100)
return render(request, 'login.html', {'form': form, 'error_msg': "用户名或密码错误"})
else:
return render(request, 'login.html', {'form': form})
@login_required
def logout(request):
logger.info("-" * 100)
logger.info(f"({request.user.username}) 登出了")
logger.info("-" * 100)
auth.logout(request)
return HttpResponseRedirect("login")
| [
"logging.getLogger",
"django.http.HttpResponseRedirect",
"django.shortcuts.render",
"zapi.forms.LoginForm",
"django.contrib.auth.authenticate",
"django.contrib.auth.login",
"django.contrib.auth.logout"
] | [((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((292, 321), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (298, 321), False, 'from django.shortcuts import render\n'), ((1476, 1496), 'django.contrib.auth.logout', 'auth.logout', (['request'], {}), '(request)\n', (1487, 1496), False, 'from django.contrib import auth\n'), ((1508, 1537), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""login"""'], {}), "('login')\n", (1528, 1537), False, 'from django.http import HttpResponseRedirect\n'), ((391, 402), 'zapi.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (400, 402), False, 'from zapi.forms import LoginForm\n'), ((418, 463), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', "{'form': form}"], {}), "(request, 'login.html', {'form': form})\n", (424, 463), False, 'from django.shortcuts import render\n'), ((489, 512), 'zapi.forms.LoginForm', 'LoginForm', (['request.POST'], {}), '(request.POST)\n', (498, 512), False, 'from zapi.forms import LoginForm\n'), ((672, 727), 'django.contrib.auth.authenticate', 'auth.authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (689, 727), False, 'from django.contrib import auth\n'), ((1283, 1328), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', "{'form': form}"], {}), "(request, 'login.html', {'form': form})\n", (1289, 1328), False, 'from django.shortcuts import render\n'), ((784, 809), 'django.contrib.auth.login', 'auth.login', (['request', 'user'], {}), '(request, user)\n', (794, 809), False, 'from django.contrib import auth\n'), ((960, 985), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/"""'], {}), "('/')\n", (980, 985), False, 'from django.http import HttpResponseRedirect\n'), ((1179, 1249), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', "{'form': form, 'error_msg': '用户名或密码错误'}"], {}), "(request, 'login.html', {'form': form, 'error_msg': '用户名或密码错误'})\n", (1185, 1249), False, 'from django.shortcuts import render\n')] |
import numpy as np
import heapq
from typing import Union
class Graph:
def __init__(self, adjacency_mat: Union[np.ndarray, str]):
""" Unlike project 2, this Graph class takes an adjacency matrix as input. `adjacency_mat`
can either be a 2D numpy array of floats or the path to a CSV file containing a 2D numpy array of floats.
In this project, we will assume `adjacency_mat` corresponds to the adjacency matrix of an undirected graph
"""
if type(adjacency_mat) == str:
self.adj_mat = self._load_adjacency_matrix_from_csv(adjacency_mat)
elif type(adjacency_mat) == np.ndarray:
self.adj_mat = adjacency_mat
else:
raise TypeError('Input must be a valid path or an adjacency matrix')
self.mst = None
def _load_adjacency_matrix_from_csv(self, path: str) -> np.ndarray:
with open(path) as f:
return np.loadtxt(f, delimiter=',')
def construct_mst(self):
""" Given `self.adj_mat`, the adjacency matrix of a connected undirected graph, implement Prim's
algorithm to construct an adjacency matrix encoding the minimum spanning tree of `self.adj_mat`.
`self.adj_mat` is a 2D numpy array of floats.
Note that because we assume our input graph is undirected, `self.adj_mat` is symmetric.
Row i and column j represents the edge weight between vertex i and vertex j. An edge weight of zero indicates that no edge exists.
TODO:
This function does not return anything. Instead, store the adjacency matrix
representation of the minimum spanning tree of `self.adj_mat` in `self.mst`.
We highly encourage the use of priority queues in your implementation. See the heapq
module, particularly the `heapify`, `heappop`, and `heappush` functions.
"""
# initiating the visited list and the adjacency matrix object
visited = []
adj_mat = self.adj_mat
# determining how many vertices there are by looking at the shape of the array
vertices = adj_mat.shape[0]
# creating an object to reflect every vertex in the adj_mat
all_vertices = list(range(vertices))
# creating a new matrix for MST to exist
self.mst = np.array([[0 for column in range(vertices)] for row in range(vertices)])
# creating a priority queue to start out with
# it is a list structured as such: [ (edge weight, start node, end node), etc.]
start = 0
queue = []
for i in range(0,vertices):
if adj_mat[start][i] != 0:
element = adj_mat[start][i], start, i
queue.append(tuple(element))
heapq.heapify(queue)
# appending the start node to visited
visited.append(start)
# begin the while statement
while len(visited) != len(all_vertices):
# pop the lowest weight edge from the queue
weight, vertex_start, vertex_end = heapq.heappop(queue)
# if dest vertex not in visited:
# add edge to mst matrix
# add dest vertex to visited list
# add outgoing edges of dest vertex to priority queue
if vertex_end not in visited:
self.mst[vertex_start][vertex_end] = weight
self.mst[vertex_end][vertex_start] = weight
visited.append(vertex_end)
for i in all_vertices:
heapq.heappush(queue, (adj_mat[vertex_end][i], vertex_end, i))
| [
"heapq.heappush",
"numpy.loadtxt",
"heapq.heapify",
"heapq.heappop"
] | [((2752, 2772), 'heapq.heapify', 'heapq.heapify', (['queue'], {}), '(queue)\n', (2765, 2772), False, 'import heapq\n'), ((925, 953), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (935, 953), True, 'import numpy as np\n'), ((3040, 3060), 'heapq.heappop', 'heapq.heappop', (['queue'], {}), '(queue)\n', (3053, 3060), False, 'import heapq\n'), ((3534, 3596), 'heapq.heappush', 'heapq.heappush', (['queue', '(adj_mat[vertex_end][i], vertex_end, i)'], {}), '(queue, (adj_mat[vertex_end][i], vertex_end, i))\n', (3548, 3596), False, 'import heapq\n')] |
#
# Copyright 2021 <NAME>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import json
from scipy.special import erfinv
def wavelength_filter2D(field, lamb, sigma, hipass=False):
nx = field.shape[0]
measure = nx**2
Lx = 1
mu_init = np.sum(field)**2/measure
sigma_init = np.sqrt(np.sum((field - mu_init)**2)/measure)
print('sigma_init=',sigma_init)
qx = np.arange(0,nx, dtype=np.float64)
qx = np.where(qx <= nx//2, qx/Lx, (nx-qx)/Lx)
qx *= 2 * np.pi
qy = np.arange(0,nx//2 +1, dtype=np.float64)
qy*= 2*np.pi/Lx
q2 = (qx**2).reshape(-1,1) + (qy**2).reshape(1,-1)
filt = np.ones_like(q2)
q_s = 2*np.pi/lamb
if (hipass is True):
filt *= (q2 >= q_s ** 2)
else:
filt *= (q2 <= q_s ** 2)
h_qs = np.fft.irfftn( np.fft.rfftn(field) * filt, field.shape)
mu_filt = np.sum(h_qs)/measure
sigma_filt = np.sqrt(np.sum((h_qs - mu_filt)**2)/measure)
print('sigma_filt=',sigma_filt)
print('mu_filt=',mu_filt)
h_qs *= sigma/sigma_filt
mu_scaled = np.sum(h_qs)/measure
sigma_scaled = np.sqrt(np.sum((h_qs - mu_scaled)**2)/measure)
print('sigma_scaled=',sigma_scaled)
return h_qs
def smoothcutoff2D(field, minimum_val, k=10):
measure = np.array(field.shape).prod()
mu0 = np.sum(field)/measure
print('mu0', mu0)
print('cutval=', minimum_val-mu0)
cutfield = half_sigmoid(field-mu0, minimum_val-mu0, k=k)
mu_cutoff = np.sum(cutfield)/measure
sigma_cutoff = np.sqrt(np.sum((cutfield - mu_cutoff)**2)/measure)
print('sigma_cutoff=',sigma_cutoff)
print('minval_cutoff=',np.amin(cutfield)+mu0)
return cutfield + mu0
def half_sigmoid(f, cutoff, k=10):
x = np.asarray(f)
y = np.asarray(x+0.0)
y[np.asarray(x < 0)] = x[np.asarray(x < 0)]*abs(cutoff)/(
abs(cutoff)**k+np.abs(x[np.asarray(x < 0)])**k)**(1/k)
return y
def threshsymm(field, Vf):
measure = np.array(field.shape).prod()
mu = np.sum(field)/measure
sigma = np.sqrt(np.sum((field-mu)**2/measure))
thresh = 2**0.5*erfinv(2*Vf - 1)
thresh_scaled = thresh*sigma + mu
thresh_field = np.ones_like(field)
thresh_field[field < thresh_scaled] = -1
print(np.sum(thresh_field)/measure)
return thresh_field
def threshmatrix(field, Vf):
measure = np.array(field.shape).prod()
vfl = 0.5-Vf/2
vfu = 0.5+Vf/2
print(vfl, vfu)
threshL = 2**0.5*erfinv(2*vfl - 1)
threshU = 2**0.5*erfinv(2*vfu - 1)
print(threshL, threshU)
mu = np.sum(field)/measure
sigma = np.sqrt(np.sum((field-mu)**2/measure))
thresh_field = np.ones_like(field)
threshscL = threshL*sigma + mu
thresh_field[field < threshscL] = -1
threshscU = threshU*sigma + mu
thresh_field[field > threshscU] = -1
print(np.sum(thresh_field)/measure)
return thresh_field
def ACsmooth2D(field, nits, ACwidth=2**0.5):
a = field+0.0
# f=(W/4)*(1-a)^2*(1+a)^2
# da/dx = 2*sqrt(f)/e
# max da/dx = 2sqrt(W/4*1^4)/e = sqrt(W)/e
# L = delta a/(max da/dx) = 2/(sqrt(W)/e) = 2e/sqrt(W)
# use W = 4/ACw, e^2 = ACw -> L = 2sqrt(ACw)/sqrt(4/ACw) = ACw
for n in range(0,nits):
a -= 0.05*(2*(2*a**3-2*a)/ACwidth + ACwidth*(4*a - np.roll(a,1,axis=0) - np.roll(a,-1,axis=0)
- np.roll(a,1,axis=1) - np.roll(a,-1,axis=1)))
return a
def save_params(propdict, fname="struct2D.json"):
jsonfile = open(fname, mode='w')
json.dump(propdict,jsonfile,default=lambda o: "(array)")
jsonfile.close()
| [
"numpy.ones_like",
"numpy.roll",
"numpy.amin",
"numpy.arange",
"numpy.where",
"numpy.asarray",
"scipy.special.erfinv",
"numpy.fft.rfftn",
"numpy.sum",
"numpy.array",
"json.dump"
] | [((1429, 1463), 'numpy.arange', 'np.arange', (['(0)', 'nx'], {'dtype': 'np.float64'}), '(0, nx, dtype=np.float64)\n', (1438, 1463), True, 'import numpy as np\n'), ((1472, 1520), 'numpy.where', 'np.where', (['(qx <= nx // 2)', '(qx / Lx)', '((nx - qx) / Lx)'], {}), '(qx <= nx // 2, qx / Lx, (nx - qx) / Lx)\n', (1480, 1520), True, 'import numpy as np\n'), ((1542, 1585), 'numpy.arange', 'np.arange', (['(0)', '(nx // 2 + 1)'], {'dtype': 'np.float64'}), '(0, nx // 2 + 1, dtype=np.float64)\n', (1551, 1585), True, 'import numpy as np\n'), ((1668, 1684), 'numpy.ones_like', 'np.ones_like', (['q2'], {}), '(q2)\n', (1680, 1684), True, 'import numpy as np\n'), ((2745, 2758), 'numpy.asarray', 'np.asarray', (['f'], {}), '(f)\n', (2755, 2758), True, 'import numpy as np\n'), ((2767, 2786), 'numpy.asarray', 'np.asarray', (['(x + 0.0)'], {}), '(x + 0.0)\n', (2777, 2786), True, 'import numpy as np\n'), ((3170, 3189), 'numpy.ones_like', 'np.ones_like', (['field'], {}), '(field)\n', (3182, 3189), True, 'import numpy as np\n'), ((3637, 3656), 'numpy.ones_like', 'np.ones_like', (['field'], {}), '(field)\n', (3649, 3656), True, 'import numpy as np\n'), ((4466, 4524), 'json.dump', 'json.dump', (['propdict', 'jsonfile'], {'default': "(lambda o: '(array)')"}), "(propdict, jsonfile, default=lambda o: '(array)')\n", (4475, 4524), False, 'import json\n'), ((1890, 1902), 'numpy.sum', 'np.sum', (['h_qs'], {}), '(h_qs)\n', (1896, 1902), True, 'import numpy as np\n'), ((2084, 2096), 'numpy.sum', 'np.sum', (['h_qs'], {}), '(h_qs)\n', (2090, 2096), True, 'import numpy as np\n'), ((2331, 2344), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (2337, 2344), True, 'import numpy as np\n'), ((2490, 2506), 'numpy.sum', 'np.sum', (['cutfield'], {}), '(cutfield)\n', (2496, 2506), True, 'import numpy as np\n'), ((2791, 2808), 'numpy.asarray', 'np.asarray', (['(x < 0)'], {}), '(x < 0)\n', (2801, 2808), True, 'import numpy as np\n'), ((3003, 3016), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (3009, 3016), True, 'import numpy as np\n'), ((3045, 3080), 'numpy.sum', 'np.sum', (['((field - mu) ** 2 / measure)'], {}), '((field - mu) ** 2 / measure)\n', (3051, 3080), True, 'import numpy as np\n'), ((3096, 3114), 'scipy.special.erfinv', 'erfinv', (['(2 * Vf - 1)'], {}), '(2 * Vf - 1)\n', (3102, 3114), False, 'from scipy.special import erfinv\n'), ((3451, 3470), 'scipy.special.erfinv', 'erfinv', (['(2 * vfl - 1)'], {}), '(2 * vfl - 1)\n', (3457, 3470), False, 'from scipy.special import erfinv\n'), ((3490, 3509), 'scipy.special.erfinv', 'erfinv', (['(2 * vfu - 1)'], {}), '(2 * vfu - 1)\n', (3496, 3509), False, 'from scipy.special import erfinv\n'), ((3545, 3558), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (3551, 3558), True, 'import numpy as np\n'), ((3587, 3622), 'numpy.sum', 'np.sum', (['((field - mu) ** 2 / measure)'], {}), '((field - mu) ** 2 / measure)\n', (3593, 3622), True, 'import numpy as np\n'), ((1296, 1309), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (1302, 1309), True, 'import numpy as np\n'), ((1346, 1376), 'numpy.sum', 'np.sum', (['((field - mu_init) ** 2)'], {}), '((field - mu_init) ** 2)\n', (1352, 1376), True, 'import numpy as np\n'), ((1835, 1854), 'numpy.fft.rfftn', 'np.fft.rfftn', (['field'], {}), '(field)\n', (1847, 1854), True, 'import numpy as np\n'), ((1936, 1965), 'numpy.sum', 'np.sum', (['((h_qs - mu_filt) ** 2)'], {}), '((h_qs - mu_filt) ** 2)\n', (1942, 1965), True, 'import numpy as np\n'), ((2132, 2163), 'numpy.sum', 'np.sum', (['((h_qs - mu_scaled) ** 2)'], {}), '((h_qs - mu_scaled) ** 2)\n', (2138, 2163), True, 'import numpy as np\n'), ((2292, 2313), 'numpy.array', 'np.array', (['field.shape'], {}), '(field.shape)\n', (2300, 2313), True, 'import numpy as np\n'), ((2542, 2577), 'numpy.sum', 'np.sum', (['((cutfield - mu_cutoff) ** 2)'], {}), '((cutfield - mu_cutoff) ** 2)\n', (2548, 2577), True, 'import numpy as np\n'), ((2652, 2669), 'numpy.amin', 'np.amin', (['cutfield'], {}), '(cutfield)\n', (2659, 2669), True, 'import numpy as np\n'), ((2965, 2986), 'numpy.array', 'np.array', (['field.shape'], {}), '(field.shape)\n', (2973, 2986), True, 'import numpy as np\n'), ((3245, 3265), 'numpy.sum', 'np.sum', (['thresh_field'], {}), '(thresh_field)\n', (3251, 3265), True, 'import numpy as np\n'), ((3343, 3364), 'numpy.array', 'np.array', (['field.shape'], {}), '(field.shape)\n', (3351, 3364), True, 'import numpy as np\n'), ((3819, 3839), 'numpy.sum', 'np.sum', (['thresh_field'], {}), '(thresh_field)\n', (3825, 3839), True, 'import numpy as np\n'), ((2814, 2831), 'numpy.asarray', 'np.asarray', (['(x < 0)'], {}), '(x < 0)\n', (2824, 2831), True, 'import numpy as np\n'), ((4338, 4360), 'numpy.roll', 'np.roll', (['a', '(-1)'], {'axis': '(1)'}), '(a, -1, axis=1)\n', (4345, 4360), True, 'import numpy as np\n'), ((2879, 2896), 'numpy.asarray', 'np.asarray', (['(x < 0)'], {}), '(x < 0)\n', (2889, 2896), True, 'import numpy as np\n'), ((4316, 4337), 'numpy.roll', 'np.roll', (['a', '(1)'], {'axis': '(1)'}), '(a, 1, axis=1)\n', (4323, 4337), True, 'import numpy as np\n'), ((4275, 4297), 'numpy.roll', 'np.roll', (['a', '(-1)'], {'axis': '(0)'}), '(a, -1, axis=0)\n', (4282, 4297), True, 'import numpy as np\n'), ((4253, 4274), 'numpy.roll', 'np.roll', (['a', '(1)'], {'axis': '(0)'}), '(a, 1, axis=0)\n', (4260, 4274), True, 'import numpy as np\n')] |
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
def assert_dataframes_equals(expected, actual):
assert expected.shape==actual.shape
assert set(expected.columns) == set(actual.columns)
columns_order = list(expected.columns)
a = actual[columns_order].sort_values(by=list(actual.columns)).reset_index(drop=True)
e = expected[columns_order].sort_values(by=list(actual.columns)).reset_index(drop=True)
assert_frame_equal(e, a, check_dtype=False)
def get_expected_and_actual(df):
actual_query_string = df.df_sql_convert_table.get_sql_string()
actual_columns = df.df_sql_convert_table.columns
datetime_columns = [c for c in actual_columns.keys() if actual_columns[c].dtype == 'DATETIME']
df_actual = pd.read_sql_query(actual_query_string, pytest.sql_connection, parse_dates=datetime_columns)
df_expected = df.df_pandas
return df_expected, df_actual
def assert_(df):
df_expected, df_actual = get_expected_and_actual(df)
# i = df_expected.new_value != df_actual.new_value
# a=df_expected[i][:3]
# b=df_expected[i][:3]
assert_dataframes_equals(df_expected, df_actual)
| [
"pandas.read_sql_query",
"pandas.testing.assert_frame_equal"
] | [((455, 498), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['e', 'a'], {'check_dtype': '(False)'}), '(e, a, check_dtype=False)\n', (473, 498), False, 'from pandas.testing import assert_frame_equal\n'), ((770, 866), 'pandas.read_sql_query', 'pd.read_sql_query', (['actual_query_string', 'pytest.sql_connection'], {'parse_dates': 'datetime_columns'}), '(actual_query_string, pytest.sql_connection, parse_dates=\n datetime_columns)\n', (787, 866), True, 'import pandas as pd\n')] |
from os import getcwd
import re
def parseIndex(rule: str):
idxRegex = re.search(r"(\d+): ", rule)
index = int(idxRegex.groups()[0])
rule = rule[idxRegex.span()[1]:]
return (index, rule)
def parsePattern(pattern: str, idxToPat: dict):
# check 1: is this a final substring?
regex = re.search(r"\"(.*?)\"", pattern)
if regex:
return regex.groups()[0] # return the single character
# check 2: is this a logical or?
regex = re.search(r"(.*) \| (.*)", pattern)
if regex:
pattern1 = regex.groups()[0]
pattern2 = regex.groups()[1]
return f"(?:{parsePattern(pattern1, idxToPat)}|{parsePattern(pattern2, idxToPat)})"
# check 3: is this a list of numbers?
regex = re.search(r"([\d ]*)", pattern)
if regex:
numbers = list(map(int, regex.groups()[0].split(" ")))
retVal = "(?:"
for number in numbers:
retVal += parsePattern(idxToPat[number], idxToPat)
return retVal + ")"
else:
return None
def checkMessage(message: str, rString: str) -> bool:
result = re.search(r"".join(rString), message)
if result:
span = result.span()
return len(message) == (span[1] - span[0])
return False
def main():
with open(f"{getcwd()}/2020/day19/input.txt", "r") as file:
part = file.read().split("\n\n")
rules = part[0].split("\n")
messages = part[1].split("\n")
indexToPattern = {}
indexToRString = {}
for rule in rules:
idxParsed = parseIndex(rule)
indexToPattern[idxParsed[0]] = idxParsed[1]
for key in indexToPattern.keys():
indexToRString[key] = parsePattern(indexToPattern[key], indexToPattern)
matches = 0
for message in messages:
if checkMessage(message, indexToRString[0]):
matches += 1
print(matches)
if __name__ == "__main__":
main()
| [
"os.getcwd",
"re.search"
] | [((81, 108), 're.search', 're.search', (['"""(\\\\d+): """', 'rule'], {}), "('(\\\\d+): ', rule)\n", (90, 108), False, 'import re\n'), ((325, 358), 're.search', 're.search', (['"""\\\\"(.*?)\\\\\\""""', 'pattern'], {}), '(\'\\\\"(.*?)\\\\"\', pattern)\n', (334, 358), False, 'import re\n'), ((493, 528), 're.search', 're.search', (['"""(.*) \\\\| (.*)"""', 'pattern'], {}), "('(.*) \\\\| (.*)', pattern)\n", (502, 528), False, 'import re\n'), ((773, 804), 're.search', 're.search', (['"""([\\\\d ]*)"""', 'pattern'], {}), "('([\\\\d ]*)', pattern)\n", (782, 804), False, 'import re\n'), ((1337, 1345), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1343, 1345), False, 'from os import getcwd\n')] |
""" entry point for execution
"""
import os
import sys
import argparse
from workflow.common.config import cfg
def parse_args():
"""get main arguments from config file and command line
"""
parser = argparse.ArgumentParser(description="Project name")
parser.add_argument(
"-c", "--config-file",
default="configs/example.yaml",
help="path to config file",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def main(config):
pass
if __name__ == "__main__":
config = parse_args()
main(config)
| [
"workflow.common.config.cfg.merge_from_file",
"workflow.common.config.cfg.freeze",
"workflow.common.config.cfg.merge_from_list",
"argparse.ArgumentParser"
] | [((218, 269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Project name"""'}), "(description='Project name')\n", (241, 269), False, 'import argparse\n'), ((669, 706), 'workflow.common.config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config_file'], {}), '(args.config_file)\n', (688, 706), False, 'from workflow.common.config import cfg\n'), ((711, 741), 'workflow.common.config.cfg.merge_from_list', 'cfg.merge_from_list', (['args.opts'], {}), '(args.opts)\n', (730, 741), False, 'from workflow.common.config import cfg\n'), ((746, 758), 'workflow.common.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (756, 758), False, 'from workflow.common.config import cfg\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
import numpy as np
import pytest
from aicsimageio import exceptions
from aicsimageio.readers.default_reader import DefaultReader
from ..conftest import get_resource_full_path, host
from ..image_container_test_utils import run_image_file_checks
@host
@pytest.mark.parametrize(
"filename, set_scene, expected_shape, expected_dims_order",
[
("example.bmp", "Image:0", (480, 640, 4), "YXS"),
("example.png", "Image:0", (800, 537, 4), "YXS"),
("example.jpg", "Image:0", (452, 400, 3), "YXS"),
("example.gif", "Image:0", (72, 268, 268, 4), "TYXS"),
(
"example_invalid_frame_count.mp4",
"Image:0",
(55, 1080, 1920, 3),
"TYXS",
),
(
"example_valid_frame_count.mp4",
"Image:0",
(72, 272, 272, 3),
"TYXS",
),
pytest.param(
"example.txt",
None,
None,
None,
marks=pytest.mark.raises(exception=exceptions.UnsupportedFileFormatError),
),
pytest.param(
"example.png",
"Image:1",
None,
None,
marks=pytest.mark.raises(exception=IndexError),
),
],
)
def test_default_reader(
filename: str,
host: str,
set_scene: str,
expected_shape: Tuple[int, ...],
expected_dims_order: str,
) -> None:
# Construct full filepath
uri = get_resource_full_path(filename, host)
# Run checks
run_image_file_checks(
ImageContainer=DefaultReader,
image=uri,
set_scene=set_scene,
expected_scenes=("Image:0",),
expected_current_scene="Image:0",
expected_shape=expected_shape,
expected_dtype=np.dtype(np.uint8),
expected_dims_order=expected_dims_order,
expected_channel_names=None,
expected_physical_pixel_sizes=(None, None, None),
expected_metadata_type=dict,
)
def test_ffmpeg_header_fail() -> None:
with pytest.raises(IOError):
# Big Buck Bunny
DefaultReader("https://archive.org/embed/archive-video-files/test.mp4")
| [
"numpy.dtype",
"pytest.mark.raises",
"pytest.raises",
"aicsimageio.readers.default_reader.DefaultReader"
] | [((2099, 2121), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (2112, 2121), False, 'import pytest\n'), ((2156, 2227), 'aicsimageio.readers.default_reader.DefaultReader', 'DefaultReader', (['"""https://archive.org/embed/archive-video-files/test.mp4"""'], {}), "('https://archive.org/embed/archive-video-files/test.mp4')\n", (2169, 2227), False, 'from aicsimageio.readers.default_reader import DefaultReader\n'), ((1842, 1860), 'numpy.dtype', 'np.dtype', (['np.uint8'], {}), '(np.uint8)\n', (1850, 1860), True, 'import numpy as np\n'), ((1065, 1132), 'pytest.mark.raises', 'pytest.mark.raises', ([], {'exception': 'exceptions.UnsupportedFileFormatError'}), '(exception=exceptions.UnsupportedFileFormatError)\n', (1083, 1132), False, 'import pytest\n'), ((1271, 1311), 'pytest.mark.raises', 'pytest.mark.raises', ([], {'exception': 'IndexError'}), '(exception=IndexError)\n', (1289, 1311), False, 'import pytest\n')] |
# python3 code
# import the opencv library
import cv2
import tkinter as tk
class Webcam_Capture:
def __init__(self):
# define a video capture object
self.vid = cv2.VideoCapture(0)
while(True):
# Capture the video frame
# by frame
ret, frame = self.vid.read()
# Display the resulting frame
cv2.imshow('frame', frame)
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
self.vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
if __name__ == '__main__':
app = Webcam_Capture()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.imshow"
] | [((182, 201), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (198, 201), False, 'import cv2\n'), ((791, 814), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (812, 814), False, 'import cv2\n'), ((416, 442), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (426, 442), False, 'import cv2\n'), ((606, 620), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (617, 620), False, 'import cv2\n')] |
import collections
import numpy as np
class Vectorizer(object):
def __init__(self):
self.mapping = {}
self.inverse_mapping = {}
self.embedding_size = 0
def vectorize_string(self, s):
vec = np.empty(len(s))
for i in range(0,len(s)):
char = s[i]
if char in self.mapping:
vec[i] = self.mapping[char]
else:
vec[i] = self.embedding_size
self.mapping[char] = self.embedding_size
self.inverse_mapping[self.embedding_size] = char
self.embedding_size += 1
return vec
def devectorize(self, v):
"""
Devectorizes a vector into a a string
"""
s = ""
for ident in v:
s += self.inverse_mapping[ident]
return s
def vectorize_corpus(corpus):
"""
corpus: A list of strings we want to vectorize
->
vectors: A list of lists that represent vectorized
representations of the strings
vectorizer: A vectorizer that can be used to vectorize and devectorize
the strings
"""
vectorizer = Vectorizer()
# vectors = np.array([])
index = 1
mapping = {}
inverse_mapping = {}
count = 0.0
vectors = []
for i in range(0,len(corpus)):
s = ""
for char in corpus[i, :]:
s += char
vectorized = vectorizer.vectorize_string(corpus[i, :])
# print(vectorized)
vectors.append(vectorized)
return np.array(vectors), vectorizer
| [
"numpy.array"
] | [((1519, 1536), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (1527, 1536), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
readme = open('README.md').read()
requirements = open('requirements.txt').readlines()
VERSION = open('VERSION').read().strip()
setup(
name='statsdecor',
version=VERSION,
description='A set of decorators and helper methods '
'for adding statsd metrics to applications.',
long_description=readme + '\n\n',
long_description_content_type='text/markdown',
author='Fresh<NAME>',
author_email='<EMAIL>',
url='https://github.com/freshbooks/statsdecor',
packages=find_packages(exclude=['test*']),
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='statsd, stats',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests'
)
| [
"distutils.core.find_packages"
] | [((682, 714), 'distutils.core.find_packages', 'find_packages', ([], {'exclude': "['test*']"}), "(exclude=['test*'])\n", (695, 714), False, 'from distutils.core import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'robotGUI.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import myFrame as mf
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1100, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.frame = mf.MyFrame(self.centralwidget)
#self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setMinimumSize(QtCore.QSize(800, 500))
self.frame.setMaximumSize(QtCore.QSize(800, 500))
self.frame.setAutoFillBackground(False)
self.frame.setFrameShape(QtWidgets.QFrame.Box)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setMidLineWidth(3)
self.frame.setObjectName("frame")
self.gridLayout.addWidget(self.frame, 0, 2, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setMaximumSize(QtCore.QSize(16777215, 25))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setMinimumSize(QtCore.QSize(200, 0))
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.createButton = QtWidgets.QPushButton(self.centralwidget)
self.createButton.setObjectName("createButton")
self.verticalLayout.addWidget(self.createButton)
self.runButton = QtWidgets.QPushButton(self.centralwidget)
self.runButton.setObjectName("runButton")
self.verticalLayout.addWidget(self.runButton)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.sld = QtWidgets.QSlider(self.centralwidget)
self.sld.setProperty("value", 10)
self.sld.setOrientation(QtCore.Qt.Vertical)
self.sld.setObjectName("sld")
self.gridLayout.addWidget(self.sld, 0, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1100, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Robot List:"))
self.createButton.setText(_translate("MainWindow", "Create Random Domain"))
self.runButton.setText(_translate("MainWindow", "Run Robots"))
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtGui.QFont",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QSlider",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGridLayout",
"myFrame.MyFrame",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtCore.QSi... | [((455, 484), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (472, 484), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((571, 612), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.centralwidget'], {}), '(self.centralwidget)\n', (592, 612), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((688, 718), 'myFrame.MyFrame', 'mf.MyFrame', (['self.centralwidget'], {}), '(self.centralwidget)\n', (698, 718), True, 'import myFrame as mf\n'), ((1234, 1257), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1255, 1257), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1341, 1377), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1357, 1377), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1457, 1470), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1468, 1470), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1753, 1792), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1772, 1792), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1985, 2026), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2006, 2026), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2168, 2209), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2189, 2209), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2404, 2441), 'PyQt5.QtWidgets.QSlider', 'QtWidgets.QSlider', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2421, 2441), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2715, 2745), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (2733, 2745), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2928, 2960), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (2948, 2960), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3112, 3161), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (3149, 3161), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((814, 836), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(800)', '(500)'], {}), '(800, 500)\n', (826, 836), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((873, 895), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(800)', '(500)'], {}), '(800, 500)\n', (885, 895), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1413, 1439), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(25)'], {}), '(16777215, 25)\n', (1425, 1439), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1831, 1851), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(200)', '(0)'], {}), '(200, 0)\n', (1843, 1851), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2780, 2808), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1100)', '(21)'], {}), '(0, 0, 1100, 21)\n', (2792, 2808), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import csv
import os
from botocore.exceptions import ClientError
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ValidationError
from django.http import Http404, HttpResponseServerError, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views.generic import FormView, CreateView
from dataworkspace.apps.core.boto3_client import get_s3_client
from dataworkspace.apps.datasets.models import (
ReferenceDataset,
ReferenceDatasetField,
SourceLink,
DataSet,
ReferenceDatasetUploadLog,
ReferenceDatasetUploadLogRecord,
)
from dataworkspace.apps.dw_admin.forms import (
ReferenceDataRowDeleteForm,
ReferenceDataRowDeleteAllForm,
SourceLinkUploadForm,
ReferenceDataRecordUploadForm,
clean_identifier,
)
class ReferenceDataRecordMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_superuser
def _get_reference_dataset(self):
return get_object_or_404(ReferenceDataset, pk=self.kwargs["reference_dataset_id"])
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
reference_dataset = self._get_reference_dataset()
ctx.update(
{
"ref_model": reference_dataset,
"opts": reference_dataset.get_record_model_class()._meta,
"record_id": self.kwargs.get("record_id"),
}
)
return ctx
class ReferenceDatasetAdminEditView(ReferenceDataRecordMixin, FormView):
template_name = "admin/reference_dataset_edit_record.html"
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx["title"] = "{} reference dataset record".format(
"Add" if self.kwargs.get("record_id") is None else "Edit"
)
return ctx
def get_queryset(self):
return self._get_reference_dataset().get_records()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
reference_dataset = self._get_reference_dataset()
record_id = self.kwargs.get("record_id")
kwargs["initial"] = {"reference_dataset": reference_dataset, "id": record_id}
if record_id is not None:
kwargs["instance"] = get_object_or_404(
reference_dataset.get_record_model_class(),
reference_dataset=reference_dataset,
id=self.kwargs.get("record_id"),
)
return kwargs
def get_form(self, form_class=None):
"""
Dynamically create a model form based on the current state
of the dynamically built record model class
:param form_class:
:return:
"""
reference_dataset = self._get_reference_dataset()
record_model = reference_dataset.get_record_model_class()
field_names = ["reference_dataset"] + [
field.column_name
if field.data_type != ReferenceDatasetField.DATA_TYPE_FOREIGN_KEY
else field.relationship_name
for _, field in reference_dataset.editable_fields.items()
]
class DynamicReferenceDatasetRecordForm(forms.ModelForm):
class Meta:
model = record_model
fields = field_names
include = field_names
widgets = {"reference_dataset": forms.HiddenInput()}
# Add the form fields/widgets
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for _, field in reference_dataset.editable_fields.items():
if field.data_type != ReferenceDatasetField.DATA_TYPE_FOREIGN_KEY:
self.fields[field.column_name] = field.get_form_field()
else:
self.fields[field.relationship_name] = field.get_form_field()
# Add validation for the custom identifier field
setattr(
DynamicReferenceDatasetRecordForm,
"clean_{}".format(reference_dataset.identifier_field.column_name),
clean_identifier,
)
return helpers.AdminForm(
DynamicReferenceDatasetRecordForm(**self.get_form_kwargs()),
list([(None, {"fields": field_names})]),
{},
)
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
def form_valid(self, form):
reference_dataset = self._get_reference_dataset()
try:
reference_dataset.save_record(self.kwargs.get("record_id"), form.form.cleaned_data)
except Exception as e: # pylint: disable=broad-except
form.form.add_error(None, e)
return self.form_invalid(form)
return super().form_valid(form)
def get_success_url(self):
messages.success(
self.request,
"Reference dataset record {} successfully".format(
"updated" if "record_id" in self.kwargs else "added"
),
)
instance = self._get_reference_dataset()
return reverse("admin:datasets_referencedataset_change", args=(instance.id,))
class ReferenceDatasetAdminDeleteView(ReferenceDataRecordMixin, FormView):
form_class = ReferenceDataRowDeleteForm
template_name = "admin/reference_data_delete_record.html"
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx["title"] = "Delete Reference Data Record"
ctx["record"] = self._get_reference_dataset().get_record_by_internal_id(
self.kwargs.get("record_id")
)
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
reference_dataset = self._get_reference_dataset()
record_id = self.kwargs.get("record_id")
record = reference_dataset.get_record_by_internal_id(record_id)
if record is None:
raise Http404
kwargs.update({"reference_dataset": reference_dataset, "initial": {"id": record_id}})
return kwargs
def form_valid(self, form):
instance = self._get_reference_dataset()
try:
instance.delete_record(form.cleaned_data["id"])
except Exception as e: # pylint: disable=broad-except
form.add_error(None, e)
return self.form_invalid(form)
return super().form_valid(form)
def get_success_url(self):
messages.success(self.request, "Reference dataset record deleted successfully")
return reverse(
"admin:datasets_referencedataset_change",
args=(self._get_reference_dataset().id,),
)
class ReferenceDatasetAdminDeleteAllView(ReferenceDataRecordMixin, FormView):
template_name = "admin/reference_data_delete_all_records.html"
form_class = ReferenceDataRowDeleteAllForm
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx["reference_dataset"] = self._get_reference_dataset()
ctx["records"] = self._get_reference_dataset().get_records()
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"reference_dataset": self._get_reference_dataset()})
return kwargs
def form_valid(self, form):
instance = self._get_reference_dataset()
try:
instance.delete_all_records()
except Exception as e: # pylint: disable=broad-except
form.add_error(None, e)
return self.form_invalid(form)
return super().form_valid(form)
def get_success_url(self):
messages.success(self.request, "Reference dataset records deleted successfully")
return reverse(
"admin:datasets_referencedataset_change",
args=(self._get_reference_dataset().id,),
)
class ReferenceDatasetAdminUploadView(ReferenceDataRecordMixin, FormView):
template_name = "admin/reference_dataset_upload_records.html"
form_class = ReferenceDataRecordUploadForm
upload_log = None
def get_template_names(self):
if self.kwargs.get("log_id") is not None:
return "admin/reference_dataset_upload_log.html"
return self.template_name
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
if self.kwargs.get("log_id"):
ctx["log"] = ReferenceDatasetUploadLog.objects.get(pk=self.kwargs["log_id"])
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
reference_dataset = self._get_reference_dataset()
kwargs.update({"reference_dataset": reference_dataset})
return kwargs
def form_valid(self, form):
reader = csv.DictReader(chunk.decode("utf-8-sig") for chunk in form.cleaned_data["file"])
reader.fieldnames = [x.lower() for x in reader.fieldnames]
reference_dataset = self._get_reference_dataset()
record_model_class = reference_dataset.get_record_model_class()
field_map = {
field.name.lower()
if field.data_type != field.DATA_TYPE_FOREIGN_KEY
else field.relationship_name_for_record_forms.lower(): field
for _, field in reference_dataset.editable_fields.items()
}
self.upload_log = ReferenceDatasetUploadLog.objects.create(
reference_dataset=reference_dataset,
created_by=self.request.user,
updated_by=self.request.user,
)
for row in reader:
log_row = ReferenceDatasetUploadLogRecord(upload_log=self.upload_log, row_data=row)
errors = {}
form_data = {"reference_dataset": reference_dataset}
for _, field in reference_dataset.editable_fields.items():
field_name = (
field.name
if field.data_type != field.DATA_TYPE_FOREIGN_KEY
else field.relationship_name_for_record_forms
)
header_name = field_name.lower()
value = row[header_name]
form_field = field.get_form_field()
if field.data_type == field_map[header_name].DATA_TYPE_FOREIGN_KEY:
# If the column is a "foreign key ensure the linked dataset exists
link_id = None
if value != "":
linked_dataset = field_map[
header_name
].linked_reference_dataset_field.reference_dataset
try:
link_id = linked_dataset.get_record_by_custom_id(value).id
except linked_dataset.get_record_model_class().DoesNotExist:
errors[
header_name
] = "Identifier {} does not exist in linked dataset".format(value)
form_data[field.relationship_name + "_id"] = link_id
else:
# Otherwise validate using the associated form field
try:
form_data[field.column_name] = form_field.clean(value)
except ValidationError as e:
errors[header_name] = str(e)
# Fetch the existing record if it exists
try:
record_id = reference_dataset.get_record_by_custom_id(
form_data.get(reference_dataset.identifier_field.column_name)
).id
except record_model_class.DoesNotExist:
record_id = None
if not errors:
try:
reference_dataset.save_record(record_id, form_data, sync_externally=False)
except Exception as e: # pylint: disable=broad-except
log_row.status = ReferenceDatasetUploadLogRecord.STATUS_FAILURE
log_row.errors = [{"Error": str(e)}]
else:
if record_id is not None:
log_row.status = ReferenceDatasetUploadLogRecord.STATUS_SUCCESS_UPDATED
else:
log_row.status = ReferenceDatasetUploadLogRecord.STATUS_SUCCESS_ADDED
else:
log_row.status = ReferenceDatasetUploadLogRecord.STATUS_FAILURE
log_row.errors = errors
log_row.save()
if reference_dataset.external_database is not None:
reference_dataset.sync_to_external_database(
reference_dataset.external_database.memorable_name
)
return super().form_valid(form)
def get_success_url(self):
messages.success(self.request, "Reference dataset upload completed successfully")
return reverse(
"dw-admin:reference-dataset-record-upload-log",
args=(self._get_reference_dataset().id, self.upload_log.id),
)
class SourceLinkUploadView(UserPassesTestMixin, CreateView): # pylint: disable=too-many-ancestors
model = SourceLink
form_class = SourceLinkUploadForm
template_name = "admin/dataset_source_link_upload.html"
def test_func(self):
return self.request.user.is_superuser
def _get_dataset(self):
return get_object_or_404(DataSet.objects.live(), pk=self.kwargs["dataset_id"])
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
dataset = self._get_dataset()
ctx.update({"dataset": dataset, "opts": dataset._meta})
return ctx
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["initial"] = {"dataset": self._get_dataset()}
return kwargs
def get_form(self, form_class=None):
form = self.get_form_class()(**self.get_form_kwargs())
return helpers.AdminForm(form, list([(None, {"fields": list(form.fields.keys())})]), {})
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.form.is_valid():
return self.form_valid(form.form)
return self.form_invalid(form)
def form_valid(self, form):
source_link = form.save(commit=False)
source_link.link_type = SourceLink.TYPE_LOCAL
source_link.url = os.path.join(
"s3://", "sourcelink", str(source_link.id), form.cleaned_data["file"].name
)
client = get_s3_client()
try:
client.put_object(
Body=form.cleaned_data["file"],
Bucket=settings.AWS_UPLOADS_BUCKET,
Key=source_link.url,
)
except ClientError as ex:
return HttpResponseServerError(
"Error saving file: {}".format(ex.response["Error"]["Message"])
)
source_link.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
messages.success(self.request, "Source link uploaded successfully")
return self._get_dataset().get_admin_edit_url()
| [
"dataworkspace.apps.datasets.models.ReferenceDatasetUploadLog.objects.get",
"django.forms.HiddenInput",
"dataworkspace.apps.core.boto3_client.get_s3_client",
"django.urls.reverse",
"dataworkspace.apps.datasets.models.ReferenceDatasetUploadLogRecord",
"django.shortcuts.get_object_or_404",
"dataworkspace.... | [((1165, 1240), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ReferenceDataset'], {'pk': "self.kwargs['reference_dataset_id']"}), "(ReferenceDataset, pk=self.kwargs['reference_dataset_id'])\n", (1182, 1240), False, 'from django.shortcuts import get_object_or_404\n'), ((5415, 5485), 'django.urls.reverse', 'reverse', (['"""admin:datasets_referencedataset_change"""'], {'args': '(instance.id,)'}), "('admin:datasets_referencedataset_change', args=(instance.id,))\n", (5422, 5485), False, 'from django.urls import reverse\n'), ((6780, 6859), 'django.contrib.messages.success', 'messages.success', (['self.request', '"""Reference dataset record deleted successfully"""'], {}), "(self.request, 'Reference dataset record deleted successfully')\n", (6796, 6859), False, 'from django.contrib import messages\n'), ((7987, 8072), 'django.contrib.messages.success', 'messages.success', (['self.request', '"""Reference dataset records deleted successfully"""'], {}), "(self.request, 'Reference dataset records deleted successfully'\n )\n", (8003, 8072), False, 'from django.contrib import messages\n'), ((9695, 9842), 'dataworkspace.apps.datasets.models.ReferenceDatasetUploadLog.objects.create', 'ReferenceDatasetUploadLog.objects.create', ([], {'reference_dataset': 'reference_dataset', 'created_by': 'self.request.user', 'updated_by': 'self.request.user'}), '(reference_dataset=\n reference_dataset, created_by=self.request.user, updated_by=self.\n request.user)\n', (9735, 9842), False, 'from dataworkspace.apps.datasets.models import ReferenceDataset, ReferenceDatasetField, SourceLink, DataSet, ReferenceDatasetUploadLog, ReferenceDatasetUploadLogRecord\n'), ((13095, 13180), 'django.contrib.messages.success', 'messages.success', (['self.request', '"""Reference dataset upload completed successfully"""'], {}), "(self.request,\n 'Reference dataset upload completed successfully')\n", (13111, 13180), False, 'from django.contrib import messages\n'), ((14810, 14825), 'dataworkspace.apps.core.boto3_client.get_s3_client', 'get_s3_client', ([], {}), '()\n', (14823, 14825), False, 'from dataworkspace.apps.core.boto3_client import get_s3_client\n'), ((15320, 15387), 'django.contrib.messages.success', 'messages.success', (['self.request', '"""Source link uploaded successfully"""'], {}), "(self.request, 'Source link uploaded successfully')\n", (15336, 15387), False, 'from django.contrib import messages\n'), ((8771, 8834), 'dataworkspace.apps.datasets.models.ReferenceDatasetUploadLog.objects.get', 'ReferenceDatasetUploadLog.objects.get', ([], {'pk': "self.kwargs['log_id']"}), "(pk=self.kwargs['log_id'])\n", (8808, 8834), False, 'from dataworkspace.apps.datasets.models import ReferenceDataset, ReferenceDatasetField, SourceLink, DataSet, ReferenceDatasetUploadLog, ReferenceDatasetUploadLogRecord\n'), ((9929, 10002), 'dataworkspace.apps.datasets.models.ReferenceDatasetUploadLogRecord', 'ReferenceDatasetUploadLogRecord', ([], {'upload_log': 'self.upload_log', 'row_data': 'row'}), '(upload_log=self.upload_log, row_data=row)\n', (9960, 10002), False, 'from dataworkspace.apps.datasets.models import ReferenceDataset, ReferenceDatasetField, SourceLink, DataSet, ReferenceDatasetUploadLog, ReferenceDatasetUploadLogRecord\n'), ((13700, 13722), 'dataworkspace.apps.datasets.models.DataSet.objects.live', 'DataSet.objects.live', ([], {}), '()\n', (13720, 13722), False, 'from dataworkspace.apps.datasets.models import ReferenceDataset, ReferenceDatasetField, SourceLink, DataSet, ReferenceDatasetUploadLog, ReferenceDatasetUploadLogRecord\n'), ((3588, 3607), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (3605, 3607), False, 'from django import forms\n')] |
from basecrawl import BaseCrawl
import praw
import string
import time
from datetime import date
from praw.models import MoreComments
from textprocess import *
from mongo import Mongo
import operator
import re
class RedditCrawl(BaseCrawl):
def __init__(self,config_path = 'config.ini'):
super().__init__('redditcrawl','crawling reddit website',config_path)
self.__client_id = self.config.getValue('Reddit','CLIENT_ID')
self.__client_secret = self.config.getValue('Reddit','CLIENT_SECRET')
self.__redirect_url = 'http://localhost:8080'
self.__user_agent = 'web:sip_gsdm:1 (by /u/jasonyangshadow)'
self.__reddit = praw.Reddit(client_id = self.__client_id, client_secret = self.__client_secret, redirect_url = self.__redirect_url, user_agent = self.__user_agent)
self.__db_reddit = self.config.getValue('Mongo','DB_REDDIT')
def request(self, params = None):
subreddit = self.__reddit.subreddit(params['name'])
ret = {}
lim = 50
if 'limit' in params:
lim = params['limit']
for sub in subreddit.new(limit = lim):
key = sub.permalink
ret[key] = {}
ret[key]['url'] = sub.url
ret[key]['title'] = sub.title
ret[key]['text'] = sub.selftext.strip( '\n').replace("\n","")
ret[key]['time'] = date.fromtimestamp(sub.created).strftime("%Y-%m-%d")
sub.comments.replace_more(limit = None)
ret[key]['comment_num'] = len(sub.comments.list())
ret[key]['comment'] = []
for comment in sub.comments.list():
ret[key]['comment'].append(comment.body.replace('\n','').replace('\t',''))
#ret[key]['sentences'] = re.split('[\.!?]+',ret[key]['text'])
return ret
def resolve(self, data = None):
t = TextProcess()
mongo = Mongo('config.ini')
for k,v in data.items():
sentiment = SentimentAnalysis(v['text'])
if (sentiment is not None and len(sentiment) > 0) and (sentiment[0] == 'neg') and (sentiment[1] > 0.7):
mongo.saveUpdateOne({'url':v['url']},{'$set':{'title':v['title'],'text': v['text'],'time': v['time'], 'comment_num': v['comment_num'], 'comment':v['comment']}}, self.__db_reddit)
#for sen in v['sentences']:
# sen = sen.strip(' \n\r\t')
# s_tmp = SentimentAnalysis(sen)
# if s_tmp is not None and len(s_tmp) > 0 and s_tmp[0] == 'neg':
# sentences_sentiment[sen] = SentimentAnalysis(sen)
#sorted_by_value = sorted(sentences_sentiment.items(), key=lambda x:x[1], reverse=True)
#for key,value in sorted_by_value:
# print('{0} => {1}'.format(key,value))
#print('-'*50)
#print("comment count: %d" % v['comment_num'])
if __name__ == '__main__':
reddit = RedditCrawl('config.ini')
param = {}
param['limit'] = 1000
param['name'] = 'JapanTravel'
while True:
reddit.run(param)
time.sleep(24*3600)
| [
"mongo.Mongo",
"datetime.date.fromtimestamp",
"praw.Reddit",
"time.sleep"
] | [((665, 808), 'praw.Reddit', 'praw.Reddit', ([], {'client_id': 'self.__client_id', 'client_secret': 'self.__client_secret', 'redirect_url': 'self.__redirect_url', 'user_agent': 'self.__user_agent'}), '(client_id=self.__client_id, client_secret=self.__client_secret,\n redirect_url=self.__redirect_url, user_agent=self.__user_agent)\n', (676, 808), False, 'import praw\n'), ((1885, 1904), 'mongo.Mongo', 'Mongo', (['"""config.ini"""'], {}), "('config.ini')\n", (1890, 1904), False, 'from mongo import Mongo\n'), ((3105, 3126), 'time.sleep', 'time.sleep', (['(24 * 3600)'], {}), '(24 * 3600)\n', (3115, 3126), False, 'import time\n'), ((1369, 1400), 'datetime.date.fromtimestamp', 'date.fromtimestamp', (['sub.created'], {}), '(sub.created)\n', (1387, 1400), False, 'from datetime import date\n')] |
from flask import render_template,url_for,request,flash,redirect,abort
from app.main import main
from app.models import User,Blog,Comment
from .. import db, photos
from .forms import UpdateProfile,CreateBlog
from flask_login import login_required,current_user
import secrets
import os
from ..email import mail_message
@main.route('/')
def index():
# quotes = get_quotes()
page = request.args.get('page',1,type =int)
blogs = Blog.query.order_by(Blog.posted.desc()).paginate(page = page)
return render_template('index.html',blogs=blogs)
@main.route('/new_post',methods=['GET','POST'])
@login_required
def new_comic():
form = CreateBlog()
if form.validate_on_submit():
title = form.title.data
user_id = current_user._get_current_object().id
content = form.content.data
blog = Blog(title = title, content = content,user_id=user_id)
blog.save_blog()
return redirect(url_for('main.index'))
return render_template('new_post.html',form = form)
@main.route('/blog/<id>')
def blog(id):
comments = Comment.query.filter_by(blog_id = id).all()
blog = Blog.query.get_or_404(id)
return render_template('coms.html',blog = blog ,comment= comments)
@main.route('/comment/<blog_id>',methods=['GET','POST'])
def comment(blog_id):
blog = Blog.query.get(blog_id)
comment = request.form.get('newcomment')
new_comment = Comment(comment = comment, user_id = current_user._get_current_object().id, blog_id=blog_id)
new_comment.save_comment()
return redirect(url_for('main.blog',id= blog.id))
@main.route('/blog/<blog_id>/delete', methods=['POST'])
@login_required
def del_post(blog_id):
blog = Blog.query.get(blog_id)
if blog.user != current_user:
abort(404)
db.session.delete(blog)
db.session.commit()
flash('Post Deleted Successfully')
return redirect(url_for('main.index'))
@main.route('/profile',methods=['GET','POST'])
def profile():
form = UpdateProfile()
if form.validate_on_submit():
if form.profile_pic.data:
picture_file= save_pic(form.profile_pic.data)
current_user.profile_pic_path = picture_file
current_user.name = form.name.data
current_user.email = form.email.data
current_user.bio = form.bio.data
db.session.commit()
flash('Profile Updated Successfully')
return redirect(url_for('main.profile'))
elif request.method =='GET':
form.bio.data = current_user.bio
profile_pic_path = url_for('static',filename='photos' + current_user.profile_pic_path)
return render_template('profile/profile.html',form=form)
@main.route('/user/<string:username>')
def user_post(username):
user = User.query.filter_by(username=username).first()
blogs = Blog.query.filter_by(user = user ).order_by(Blog.posted.desc()).paginate(page = page)
return render_template('posts.html',blogs=blogs,user=user)
@main.route('/user/<name>/updateprofile',methods=['GET','POST'])
@login_required
def Updateprof(name):
form = UpdateProf()
user = User.query.filter_by(username = name).first()
if user == None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
user.save_user()
return redirect(url_for('.profile',name = name))
return render_template('profile/update.html',form=form)
def save_pic(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_filename = random_hex + f_ext
picture_path = os.path.join('app/static/photos', picture_filename)
output_size = (80,80)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_filename
@main.route('/blog/<blog_id>/update',methods=['GET','POST'])
@login_required
def updateblog(blog_id):
blog = Blog.query.get(blog_id)
if blog.user != current_user:
abort(404)
form = CreateBlog()
if form.validate_on_submit():
blog.title = form.title.data
blog.content = form.content.data
db.session.commit()
flash('Post Successfully Updated')
return redirect(url_for('main.index',id=blog_id))
if request.method == 'GET':
form.title.data = blog.title
form.content.data = blog.content
return render_template('new_post.html',form = form)
# @main.route('/commic/recommed/<int:id>')
# @loginrequired
# def recommed(id):
# commic=Commic.query.get(id)
# form=RecommedationForm()
# if form.validate_on_submit():
# new_recommedtaion=Recommedation(heading=form.heading.data,content=form.content.data)
# new_recommedataion.recommedation_saves()
# return redirect('index.html',commic=commic)
# title=f'make a new recommedation to |commic.id '
# return ('recommend.html',form=form)
| [
"flask.render_template",
"flask.request.args.get",
"secrets.token_hex",
"app.models.Blog.posted.desc",
"flask.flash",
"os.path.splitext",
"os.path.join",
"app.models.Blog",
"flask.request.form.get",
"app.models.Blog.query.get",
"app.main.main.route",
"flask.url_for",
"flask_login.current_use... | [((322, 337), 'app.main.main.route', 'main.route', (['"""/"""'], {}), "('/')\n", (332, 337), False, 'from app.main import main\n'), ((556, 604), 'app.main.main.route', 'main.route', (['"""/new_post"""'], {'methods': "['GET', 'POST']"}), "('/new_post', methods=['GET', 'POST'])\n", (566, 604), False, 'from app.main import main\n'), ((1019, 1043), 'app.main.main.route', 'main.route', (['"""/blog/<id>"""'], {}), "('/blog/<id>')\n", (1029, 1043), False, 'from app.main import main\n'), ((1227, 1284), 'app.main.main.route', 'main.route', (['"""/comment/<blog_id>"""'], {'methods': "['GET', 'POST']"}), "('/comment/<blog_id>', methods=['GET', 'POST'])\n", (1237, 1284), False, 'from app.main import main\n'), ((1583, 1637), 'app.main.main.route', 'main.route', (['"""/blog/<blog_id>/delete"""'], {'methods': "['POST']"}), "('/blog/<blog_id>/delete', methods=['POST'])\n", (1593, 1637), False, 'from app.main import main\n'), ((1902, 1949), 'app.main.main.route', 'main.route', (['"""/profile"""'], {'methods': "['GET', 'POST']"}), "('/profile', methods=['GET', 'POST'])\n", (1912, 1949), False, 'from app.main import main\n'), ((2655, 2692), 'app.main.main.route', 'main.route', (['"""/user/<string:username>"""'], {}), "('/user/<string:username>')\n", (2665, 2692), False, 'from app.main import main\n'), ((2940, 3005), 'app.main.main.route', 'main.route', (['"""/user/<name>/updateprofile"""'], {'methods': "['GET', 'POST']"}), "('/user/<name>/updateprofile', methods=['GET', 'POST'])\n", (2950, 3005), False, 'from app.main import main\n'), ((3752, 3813), 'app.main.main.route', 'main.route', (['"""/blog/<blog_id>/update"""'], {'methods': "['GET', 'POST']"}), "('/blog/<blog_id>/update', methods=['GET', 'POST'])\n", (3762, 3813), False, 'from app.main import main\n'), ((390, 427), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {'type': 'int'}), "('page', 1, type=int)\n", (406, 427), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((512, 554), 'flask.render_template', 'render_template', (['"""index.html"""'], {'blogs': 'blogs'}), "('index.html', blogs=blogs)\n", (527, 554), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((972, 1015), 'flask.render_template', 'render_template', (['"""new_post.html"""'], {'form': 'form'}), "('new_post.html', form=form)\n", (987, 1015), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((1128, 1153), 'app.models.Blog.query.get_or_404', 'Blog.query.get_or_404', (['id'], {}), '(id)\n', (1149, 1153), False, 'from app.models import User, Blog, Comment\n'), ((1165, 1222), 'flask.render_template', 'render_template', (['"""coms.html"""'], {'blog': 'blog', 'comment': 'comments'}), "('coms.html', blog=blog, comment=comments)\n", (1180, 1222), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((1316, 1339), 'app.models.Blog.query.get', 'Blog.query.get', (['blog_id'], {}), '(blog_id)\n', (1330, 1339), False, 'from app.models import User, Blog, Comment\n'), ((1354, 1384), 'flask.request.form.get', 'request.form.get', (['"""newcomment"""'], {}), "('newcomment')\n", (1370, 1384), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((1688, 1711), 'app.models.Blog.query.get', 'Blog.query.get', (['blog_id'], {}), '(blog_id)\n', (1702, 1711), False, 'from app.models import User, Blog, Comment\n'), ((1822, 1856), 'flask.flash', 'flash', (['"""Post Deleted Successfully"""'], {}), "('Post Deleted Successfully')\n", (1827, 1856), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((2522, 2590), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': "('photos' + current_user.profile_pic_path)"}), "('static', filename='photos' + current_user.profile_pic_path)\n", (2529, 2590), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((2601, 2651), 'flask.render_template', 'render_template', (['"""profile/profile.html"""'], {'form': 'form'}), "('profile/profile.html', form=form)\n", (2616, 2651), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((2886, 2939), 'flask.render_template', 'render_template', (['"""posts.html"""'], {'blogs': 'blogs', 'user': 'user'}), "('posts.html', blogs=blogs, user=user)\n", (2901, 2939), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((3323, 3372), 'flask.render_template', 'render_template', (['"""profile/update.html"""'], {'form': 'form'}), "('profile/update.html', form=form)\n", (3338, 3372), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((3419, 3439), 'secrets.token_hex', 'secrets.token_hex', (['(8)'], {}), '(8)\n', (3436, 3439), False, 'import secrets\n'), ((3455, 3494), 'os.path.splitext', 'os.path.splitext', (['form_picture.filename'], {}), '(form_picture.filename)\n', (3471, 3494), False, 'import os\n'), ((3556, 3607), 'os.path.join', 'os.path.join', (['"""app/static/photos"""', 'picture_filename'], {}), "('app/static/photos', picture_filename)\n", (3568, 3607), False, 'import os\n'), ((3864, 3887), 'app.models.Blog.query.get', 'Blog.query.get', (['blog_id'], {}), '(blog_id)\n', (3878, 3887), False, 'from app.models import User, Blog, Comment\n'), ((4327, 4370), 'flask.render_template', 'render_template', (['"""new_post.html"""'], {'form': 'form'}), "('new_post.html', form=form)\n", (4342, 4370), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((834, 885), 'app.models.Blog', 'Blog', ([], {'title': 'title', 'content': 'content', 'user_id': 'user_id'}), '(title=title, content=content, user_id=user_id)\n', (838, 885), False, 'from app.models import User, Blog, Comment\n'), ((1547, 1579), 'flask.url_for', 'url_for', (['"""main.blog"""'], {'id': 'blog.id'}), "('main.blog', id=blog.id)\n", (1554, 1579), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((1754, 1764), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1759, 1764), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((1877, 1898), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (1884, 1898), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((2338, 2375), 'flask.flash', 'flash', (['"""Profile Updated Successfully"""'], {}), "('Profile Updated Successfully')\n", (2343, 2375), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((3152, 3162), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3157, 3162), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((3930, 3940), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3935, 3940), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((4113, 4147), 'flask.flash', 'flash', (['"""Post Successfully Updated"""'], {}), "('Post Successfully Updated')\n", (4118, 4147), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((745, 779), 'flask_login.current_user._get_current_object', 'current_user._get_current_object', ([], {}), '()\n', (777, 779), False, 'from flask_login import login_required, current_user\n'), ((938, 959), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (945, 959), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((1073, 1108), 'app.models.Comment.query.filter_by', 'Comment.query.filter_by', ([], {'blog_id': 'id'}), '(blog_id=id)\n', (1096, 1108), False, 'from app.models import User, Blog, Comment\n'), ((2400, 2423), 'flask.url_for', 'url_for', (['"""main.profile"""'], {}), "('main.profile')\n", (2407, 2423), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((2729, 2768), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (2749, 2768), False, 'from app.models import User, Blog, Comment\n'), ((3077, 3112), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'name'}), '(username=name)\n', (3097, 3112), False, 'from app.models import User, Blog, Comment\n'), ((3279, 3309), 'flask.url_for', 'url_for', (['""".profile"""'], {'name': 'name'}), "('.profile', name=name)\n", (3286, 3309), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((4172, 4205), 'flask.url_for', 'url_for', (['"""main.index"""'], {'id': 'blog_id'}), "('main.index', id=blog_id)\n", (4179, 4205), False, 'from flask import render_template, url_for, request, flash, redirect, abort\n'), ((459, 477), 'app.models.Blog.posted.desc', 'Blog.posted.desc', ([], {}), '()\n', (475, 477), False, 'from app.models import User, Blog, Comment\n'), ((1440, 1474), 'flask_login.current_user._get_current_object', 'current_user._get_current_object', ([], {}), '()\n', (1472, 1474), False, 'from flask_login import login_required, current_user\n'), ((2833, 2851), 'app.models.Blog.posted.desc', 'Blog.posted.desc', ([], {}), '()\n', (2849, 2851), False, 'from app.models import User, Blog, Comment\n'), ((2789, 2820), 'app.models.Blog.query.filter_by', 'Blog.query.filter_by', ([], {'user': 'user'}), '(user=user)\n', (2809, 2820), False, 'from app.models import User, Blog, Comment\n')] |
"""
test_nc_util.py
Routines to test georef's nc_info.py code
"""
import os
import georef
import georef.nc_util
def test_write_sample_netCDF_file():
"""Test that can write a simple geo-referenced netCDF file"""
test_filename = './test_georef.nc'
try:
assert not os.path.isfile(test_filename)
except AssertionError:
print('test file exists, skipping test: {}'.format(test_filename))
return
georef.nc_util.write_simple_netCDF_file(test_filename, overwrite=True)
os.remove(test_filename)
| [
"os.remove",
"os.path.isfile",
"georef.nc_util.write_simple_netCDF_file"
] | [((437, 507), 'georef.nc_util.write_simple_netCDF_file', 'georef.nc_util.write_simple_netCDF_file', (['test_filename'], {'overwrite': '(True)'}), '(test_filename, overwrite=True)\n', (476, 507), False, 'import georef\n'), ((512, 536), 'os.remove', 'os.remove', (['test_filename'], {}), '(test_filename)\n', (521, 536), False, 'import os\n'), ((286, 315), 'os.path.isfile', 'os.path.isfile', (['test_filename'], {}), '(test_filename)\n', (300, 315), False, 'import os\n')] |
# Generated by Django 4.0.2 on 2022-03-26 14:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cpovc_forms', '0002_initial'),
]
operations = [
migrations.AlterField(
model_name='ovchivmanagement',
name='substitution_firstline_date',
field=models.DateTimeField(default=datetime.datetime(2022, 3, 26, 17, 52, 28, 252242)),
),
]
| [
"datetime.datetime"
] | [((405, 455), 'datetime.datetime', 'datetime.datetime', (['(2022)', '(3)', '(26)', '(17)', '(52)', '(28)', '(252242)'], {}), '(2022, 3, 26, 17, 52, 28, 252242)\n', (422, 455), False, 'import datetime\n')] |
#
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''
Python wrapper for parallel execution shell
===========================================
*** This module should be run, not imported ***
Usage: ```python -m radssh.shell host [...]```
Will read settings from /etc/radssh_config, and supplement with ~/.radssh_config.
Settings may also be provided on the command line, using the form --keyword=value.
'''
import sys
import os
import time
import socket
import pprint
import readline
import atexit
import logging
from . import ssh
from . import config
from .console import RadSSHConsole, monochrome
try:
from . import star_commands as star
import radssh.plugins
except ImportError:
class NullStarCommands(object):
'''Use stub if plugins or star_commands can not be loaded'''
@classmethod
def call(*args, **kwargs):
print('Plugins directory not found - *commands disabled')
star_help = call
star_info = call
commands = {'*help': star_help}
star = NullStarCommands()
# Try using colorama when running on Windows
if sys.platform.startswith('win'):
try:
import colorama
colorama.initialise.init()
except Exception as e:
print('Unable to support ANSI escape sequences via colorama module')
print(e)
sys.exit(1)
# Ensure ~/.ssh directory exists, with sensible permissions
try:
os.mkdir(os.path.expanduser('~/.ssh'), 0o700)
except OSError:
pass
################################################################################
command_listeners = []
def shell(cluster, logdir=None, playbackfile=None, defaults=None):
'''Very basic interactive shell'''
if not defaults:
defaults = config.load_default_settings()
while True:
try:
if playbackfile:
try:
cmd = next(playbackfile)
print('%s %s' % (defaults['shell.prompt'], cmd.strip()))
except StopIteration:
return
else:
try:
cmd = input('%s ' % defaults['shell.prompt'])
except KeyboardInterrupt:
print('\n<Ctrl-C> during input\nUse EOF (<Ctrl-D>) or *exit to exit shell\n')
continue
# Feed command line to any registered listeners from plugins
for feed in command_listeners:
feed_result = feed(cmd)
if feed_result:
if defaults['show_altered_commands'] == 'on':
cluster.console.message('Command modified from "%s" to "%s"' % (cmd, feed_result))
cmd = str(feed_result)
if logdir:
with open(os.path.join(logdir, 'session.commands'), 'a') as f:
f.write('%s\n' % cmd)
args = cmd.split()
if len(args) > 0:
if os.path.basename(args[0]) == 'sudo' and len(args) > 1:
initial_command = os.path.basename(args[1])
else:
initial_command = os.path.basename(args[0])
if initial_command in defaults['commands.forbidden'].split(','):
print('You really don\'t want to run %s without a TTY, do you?' % initial_command)
continue
if initial_command in defaults['commands.restricted'].split(','):
print('STOP! "%s" is listed as a restricted command (Potentially dangerous)' % initial_command)
print('and requires explicit confirmation before running.')
print('Please double check all parameters, just to be sure...')
print(' >>>', cmd)
confirm = input('Enter \'100%\' if completely sure: ')
if confirm != '100%':
continue
if args[0].startswith('#'):
# Comment
continue
if args[0].startswith('*'):
ret = star.call(cluster, logdir, cmd)
cluster.console.join()
if isinstance(ret, ssh.Cluster):
cluster.console.message('Switched cluster from %r to %r' % (cluster, ret))
cluster = ret
continue
r = cluster.run_command(cmd)
if logdir:
cluster.log_result(logdir, encoding=defaults['character_encoding'])
# Quick summary report, if jobs failed
failures = {}
completions = []
completion_time = 0.0
for k, job in r.items():
v = job.result
if job.completed:
if v.return_code == 0:
completions.append(str(k))
completion_time += job.end_time - job.start_time
else:
failures.setdefault(v.return_code, []).append(str(k))
else:
failures.setdefault(None, []).append(str(k))
if failures:
print('\nSummary of return codes:')
for k, v in [(0, completions)] + list(failures.items()):
if len(v) > 5:
print(k, '\t- (%d hosts)' % len(v))
else:
print(k, '\t-', sorted(v))
if completions:
print('Average completion time for %d hosts: %fs' % (len(completions), (completion_time / len(completions))))
except KeyboardInterrupt:
print('Ctrl-C during command preparation - command aborted.')
except EOFError as e:
print(e)
break
print('Shell exiting')
cluster.close_connections()
################################################################################
# Readline/libedit command completion
# Supports *commands, executables (LOCAL), and path (REMOTE) completion
class radssh_tab_handler(object):
'''Class wrapper for readline TAB key completion'''
def __init__(self, cluster, star):
# Need access to the cluster object to get SFTP service
# for remote path completion, and the star command dictionary
# to know what *commands are available.
self.cluster = cluster
self.star = star
try:
self.using_libedit = ('libedit' in readline.__doc__)
except TypeError:
# pyreadline (windows) readline.__doc__ is None (not iterable)
self.using_libedit = False
self.completion_choices = []
readline.set_completer()
readline.set_completer(self.complete)
readline.set_completer_delims(' \t\n/*')
if self.using_libedit:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
def complete_star_command(self, lead_in, text, state):
if state == 0:
# Rebuild cached list of choices that match
# Reset list to empty (choices = [] would reference local, not persistent list)
del self.completion_choices[:]
for choice in self.star.commands.keys():
if choice.startswith(lead_in):
self.completion_choices.append(choice + ' ')
# Discrepancy with readline/libedit and handling of leading *
if self.using_libedit:
return self.completion_choices[state]
else:
return self.completion_choices[state][1:]
def complete_executable(self, lead_in, text, state):
if state == 0:
del self.completion_choices[:]
for path_dir in os.environ['PATH'].split(os.path.pathsep):
try:
for f in os.listdir(path_dir):
try:
if os.path.isdir(os.path.join(path_dir, f)):
continue
st = os.stat(os.path.join(path_dir, f))
if (st.st_mode & 0o111) and f.startswith(text):
self.completion_choices.append(f + ' ')
except OSError:
continue
except OSError:
continue
self.completion_choices.append(None)
return self.completion_choices[state]
def complete_remote_path(self, lead_in, text, state):
if state == 0:
del self.completion_choices[:]
for t in self.cluster.connections.values():
if t.is_authenticated():
break
else:
print('No authenticated connections')
raise RuntimeError('Tab Completion unavailable')
s = t.open_sftp_client()
parent = os.path.dirname(lead_in)
partial = os.path.basename(lead_in)
if not parent:
parent = './'
for x in s.listdir(parent):
if x.startswith(partial):
full_path = os.path.join(parent, x)
try:
# See if target is a directory, and append '/' if it is
s.chdir(full_path)
x += '/'
full_path += '/'
except Exception:
pass
if self.using_libedit:
self.completion_choices.append(full_path)
else:
self.completion_choices.append(x)
self.completion_choices.append(None)
return self.completion_choices[state]
def complete_local_path(self, lead_in, text, state):
if state == 0:
del self.completion_choices[:]
parent = os.path.dirname(lead_in)
partial = os.path.basename(lead_in)
if not parent:
parent = './'
for x in os.listdir(parent):
if x.startswith(partial):
full_path = os.path.join(parent, x)
if os.path.isdir(full_path):
# See if target is a directory, and append '/' if it is
x += '/'
full_path += '/'
if self.using_libedit:
self.completion_choices.append(full_path)
else:
self.completion_choices.append(x)
self.completion_choices.append(None)
return self.completion_choices[state]
def complete(self, text, state):
buffer = readline.get_line_buffer()
lead_in = buffer[:readline.get_endidx()].split()[-1]
try:
if buffer.startswith('*') and ' ' in buffer:
# See if *command has custom tab completion
star_command = self.star.commands.get(buffer.split()[0], None)
if star_command and star_command.tab_completion:
return star_command.tab_completion(self, buffer, lead_in, text, state)
if lead_in.startswith('*'):
# User needs help completing *command...
return self.complete_star_command(lead_in, text, state)
else:
# Default behavior - remote file path completion
return self.complete_remote_path(lead_in, text, state)
except Exception:
raise
################################################################################
# Workaround for https://github.com/radssh/radssh/issues/32
# Newer GNU Readline library raise false errno value that the Python
# wrapper reraises as IOError. https://bugs.python.org/issue10350 not
# being backported to Python 2.7, so handle it with more code...
def safe_write_history_file(filename):
# To avoid false negative, use stat() to test the file modification times
try:
readline.write_history_file(filename)
except IOError as e:
# Ignore this exception if we wrote out the history file recently
try:
post = os.stat(filename).st_mtime
if post > time.time() - 3:
logging.debug('Ignoring "%s" writing history file', str(e))
except Exception:
raise e
################################################################################
def radssh_shell_main():
args = sys.argv[1:]
defaults = config.load_settings()
# Keep command line options separately, for reuse in sshconfig defaults
cmdline_options = config.command_line_settings(args, defaults.get('user.settings'))
defaults.update(cmdline_options)
if 'socket.timeout' in defaults:
socket.setdefaulttimeout(float(defaults['socket.timeout']))
# Setup Logging
logformat = '%(asctime)s %(levelname)-8s [%(name)s:%(thread)08X] %(message)s'
logdir = os.path.expanduser(time.strftime(defaults.get('logdir', '')))
if logdir:
if not os.path.exists(logdir):
os.mkdir(logdir)
logging.basicConfig(filename=os.path.join(logdir, 'radssh.log'),
format=logformat)
else:
logging.basicConfig(format=logformat)
pass
try:
logging.getLogger().setLevel(getattr(logging, defaults['loglevel'].upper()))
except AttributeError:
raise RuntimeError('RadSSH setting "loglevel" should be set to one of [CRITICAL,ERROR,WARNING,INFO,DEBUG] instead of "%s"', defaults['loglevel'])
logger = logging.getLogger('radssh')
# Make an AuthManager to handle user authentication
a = ssh.AuthManager(defaults['username'],
auth_file=os.path.expanduser(defaults['authfile']),
try_auth_none=(defaults['try_auth_none'] == 'on'))
# Load Plugins to aid in host lookups and add *commands dynamically
loaded_plugins = {}
exe_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
system_plugin_dir = os.path.join(exe_dir, 'plugins')
disable_plugins = defaults['disable_plugins'].split(',')
plugin_dirs = [x for x in defaults['plugins'].split(';') if x]
plugin_dirs.append(system_plugin_dir)
for x in plugin_dirs:
plugin_dir = os.path.abspath(os.path.expanduser(x))
if not os.path.exists(plugin_dir):
continue
for module in sorted(os.listdir(plugin_dir)):
if module.endswith('.py') and not module.startswith('__'):
plugin = module[:-3]
# Skip modules found in more that 1 location, and ones explicitly disabled
if plugin in loaded_plugins or plugin in disable_plugins:
continue
try:
logger.info('Loading plugin module: %s', plugin)
this_plugin = radssh.plugins.load_plugin(os.path.join(plugin_dir, module))
if hasattr(this_plugin, 'settings'):
prefix = 'plugin.%s.' % plugin
user_settings = {}
user_settings = dict([(k[len(prefix):], v) for k, v in defaults.items() if k.startswith(prefix)])
logger.info('Updating settings for plugin %s with: %s', plugin, user_settings)
this_plugin.settings.update(user_settings)
if hasattr(this_plugin, 'init'):
logger.debug('Calling init method for plugin: %s', plugin)
this_plugin.init(defaults=defaults, auth=a, plugins=loaded_plugins, star_commands=star.commands, shell=shell)
if hasattr(this_plugin, 'star_commands'):
logger.debug('Registering *commands for plugin: %s %s', plugin, this_plugin.star_commands.keys())
star.commands.update(this_plugin.star_commands)
if hasattr(this_plugin, 'command_listener'):
command_listeners.append(this_plugin.command_listener)
loaded_plugins[plugin] = this_plugin
except Exception as e:
logger.error('Failed to load plugin (%s): %s', plugin, repr(e))
# Use command line args as connect list, or give user option to supply list now
if not args:
print('No command line arguments given.')
print('You can connect to a number of hosts by hostname or IP')
if loaded_plugins:
print('You can also give symbolic names that can be translated by')
print('the following loaded plugins:')
for module, plugin in loaded_plugins.items():
try:
lookup_doc = plugin.lookup.__doc__
print(module, plugin.__doc__)
print('\t%s' % lookup_doc)
try:
plugin.banner()
except AttributeError:
pass
except AttributeError:
pass
connect_list = input('Enter a list of connection destinations: ').split()
else:
connect_list = args
if not connect_list:
sys.exit(0)
# Do the connections if needed, offer names to plugin lookup() functions
hosts = []
for arg in connect_list:
for helper, resolver in loaded_plugins.items():
if hasattr(resolver, 'lookup'):
try:
cluster = resolver.lookup(arg)
if cluster:
logger.debug('%s expanded by %s', arg, helper)
for label, host, conn in cluster:
if conn:
hosts.append((label, conn))
else:
hosts.append((label, host))
break
except Exception as e:
logger.error('Exception looking up %s via %s: %r', arg, helper, e)
cluster = None
else:
hosts.append((arg, None))
# Almost done with all the preliminary setup steps...
if defaults['loglevel'] not in ('CRITICAL', 'ERROR'):
print('*** Parallel Shell ***')
print('Using AuthManager:', a)
print('Logging to %s' % logdir)
pprint.pprint(defaults, indent=4)
print()
star.star_help()
# Create a RadSSHConsole instance for screen output
job_buffer = int(defaults['stalled_job_buffer'])
console_name = defaults['shell.console']
console = None
if '.' in console_name:
# Try finding formatter as module.function from loaded plugins
logger.info('Attempting to load custom console formatter: %s', console_name)
module_name, function_name = console_name.split('.', 1)
try:
custom_formatter = getattr(loaded_plugins[module_name], function_name)
console = RadSSHConsole(formatter=custom_formatter, retain_recent=job_buffer)
except KeyError:
logger.error('Plugin not loaded for shell.console formatter %s', console_name)
except AttributeError:
logger.error('Plugin formatter not found for shell.console formatter %s', console_name)
except Exception as e:
logger.error('Exception on console formatter %s: %r', console_name, e)
# Fallback to a standard console if plugin provided one did not load
if console is None:
if not sys.stdout.isatty() or console_name == 'monochrome':
console = RadSSHConsole(formatter=monochrome, retain_recent=job_buffer)
else:
console = RadSSHConsole(retain_recent=job_buffer)
# Finally, we are able to create the Cluster
print('Connecting to %d hosts...' % len(hosts))
cluster = ssh.Cluster(hosts, auth=a, console=console, defaults=defaults)
ready, disabled, failed_auth, failed_connect, dropped = cluster.connection_summary()
if defaults['loglevel'] not in ('CRITICAL', 'ERROR'):
star.star_info(cluster, logdir, '', [])
else:
# If cluster is not 100% connected, let user know even if loglevel is not low enough
if any((failed_auth, failed_connect, dropped)):
print('There were problems connecting to some nodes:')
if failed_connect:
print(' %d nodes failed to connect' % failed_connect)
if failed_auth:
print(' %d nodes failed authentication' % failed_auth)
if dropped:
print(' %d dropped connections' % dropped)
print(' Use "*info" for connection details.')
if ready == 1 and disabled + failed_auth + failed_connect + dropped == 0:
# Cluster size of one - check if auto_tty is set
if defaults['auto_tty'] == 'on' and 'star_tty' in loaded_plugins:
print('Auto-invoking *tty for a cluster size of 1')
loaded_plugins['star_tty'].settings['prompt_delay'] = "0.0"
star.call(cluster, logdir, '*tty')
# cluster.console.join()
cluster.close_connections()
raise SystemExit("Session complete")
# Command line history support
if defaults.get('historyfile'):
histfile = os.path.expanduser(defaults['historyfile'])
try:
readline.read_history_file(histfile)
except IOError:
pass
readline.set_history_length(int(os.environ.get('HISTSIZE', 1000)))
if sys.version_info.major == 2:
# Workaround #32 - fix not backported to Python 2.X
atexit.register(safe_write_history_file, histfile)
else:
atexit.register(readline.write_history_file, histfile)
# Add TAB completion for *commands and remote file paths
tab_completion = radssh_tab_handler(cluster, star)
# With the cluster object, start interactive session
shell(cluster=cluster, logdir=logdir, defaults=defaults)
if __name__ == '__main__':
radssh_shell_main()
| [
"logging.getLogger",
"sys.platform.startswith",
"readline.read_history_file",
"sys.exit",
"pprint.pprint",
"readline.get_endidx",
"readline.set_completer",
"os.path.exists",
"readline.parse_and_bind",
"os.listdir",
"colorama.initialise.init",
"os.path.isdir",
"os.mkdir",
"atexit.register",... | [((1436, 1466), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (1459, 1466), False, 'import sys\n'), ((14052, 14079), 'logging.getLogger', 'logging.getLogger', (['"""radssh"""'], {}), "('radssh')\n", (14069, 14079), False, 'import logging\n'), ((14516, 14548), 'os.path.join', 'os.path.join', (['exe_dir', '"""plugins"""'], {}), "(exe_dir, 'plugins')\n", (14528, 14548), False, 'import os\n'), ((1509, 1535), 'colorama.initialise.init', 'colorama.initialise.init', ([], {}), '()\n', (1533, 1535), False, 'import colorama\n'), ((1756, 1784), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.ssh"""'], {}), "('~/.ssh')\n", (1774, 1784), False, 'import os\n'), ((7137, 7161), 'readline.set_completer', 'readline.set_completer', ([], {}), '()\n', (7159, 7161), False, 'import readline\n'), ((7170, 7207), 'readline.set_completer', 'readline.set_completer', (['self.complete'], {}), '(self.complete)\n', (7192, 7207), False, 'import readline\n'), ((7216, 7256), 'readline.set_completer_delims', 'readline.set_completer_delims', (['""" \t\n/*"""'], {}), "(' \\t\\n/*')\n", (7245, 7256), False, 'import readline\n'), ((11179, 11205), 'readline.get_line_buffer', 'readline.get_line_buffer', ([], {}), '()\n', (11203, 11205), False, 'import readline\n'), ((12480, 12517), 'readline.write_history_file', 'readline.write_history_file', (['filename'], {}), '(filename)\n', (12507, 12517), False, 'import readline\n'), ((13713, 13750), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'logformat'}), '(format=logformat)\n', (13732, 13750), False, 'import logging\n'), ((14461, 14490), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (14477, 14490), False, 'import os\n'), ((17677, 17688), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (17685, 17688), False, 'import sys\n'), ((18822, 18855), 'pprint.pprint', 'pprint.pprint', (['defaults'], {'indent': '(4)'}), '(defaults, indent=4)\n', (18835, 18855), False, 'import pprint\n'), ((21755, 21798), 'os.path.expanduser', 'os.path.expanduser', (["defaults['historyfile']"], {}), "(defaults['historyfile'])\n", (21773, 21798), False, 'import os\n'), ((1665, 1676), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1673, 1676), False, 'import sys\n'), ((7300, 7346), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""bind ^I rl_complete"""'], {}), "('bind ^I rl_complete')\n", (7323, 7346), False, 'import readline\n'), ((7373, 7413), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (7396, 7413), False, 'import readline\n'), ((9374, 9398), 'os.path.dirname', 'os.path.dirname', (['lead_in'], {}), '(lead_in)\n', (9389, 9398), False, 'import os\n'), ((9421, 9446), 'os.path.basename', 'os.path.basename', (['lead_in'], {}), '(lead_in)\n', (9437, 9446), False, 'import os\n'), ((10364, 10388), 'os.path.dirname', 'os.path.dirname', (['lead_in'], {}), '(lead_in)\n', (10379, 10388), False, 'import os\n'), ((10411, 10436), 'os.path.basename', 'os.path.basename', (['lead_in'], {}), '(lead_in)\n', (10427, 10436), False, 'import os\n'), ((10515, 10533), 'os.listdir', 'os.listdir', (['parent'], {}), '(parent)\n', (10525, 10533), False, 'import os\n'), ((13523, 13545), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (13537, 13545), False, 'import os\n'), ((13559, 13575), 'os.mkdir', 'os.mkdir', (['logdir'], {}), '(logdir)\n', (13567, 13575), False, 'import os\n'), ((14217, 14257), 'os.path.expanduser', 'os.path.expanduser', (["defaults['authfile']"], {}), "(defaults['authfile'])\n", (14235, 14257), False, 'import os\n'), ((14783, 14804), 'os.path.expanduser', 'os.path.expanduser', (['x'], {}), '(x)\n', (14801, 14804), False, 'import os\n'), ((14821, 14847), 'os.path.exists', 'os.path.exists', (['plugin_dir'], {}), '(plugin_dir)\n', (14835, 14847), False, 'import os\n'), ((14899, 14921), 'os.listdir', 'os.listdir', (['plugin_dir'], {}), '(plugin_dir)\n', (14909, 14921), False, 'import os\n'), ((21824, 21860), 'readline.read_history_file', 'readline.read_history_file', (['histfile'], {}), '(histfile)\n', (21850, 21860), False, 'import readline\n'), ((22093, 22143), 'atexit.register', 'atexit.register', (['safe_write_history_file', 'histfile'], {}), '(safe_write_history_file, histfile)\n', (22108, 22143), False, 'import atexit\n'), ((22170, 22224), 'atexit.register', 'atexit.register', (['readline.write_history_file', 'histfile'], {}), '(readline.write_history_file, histfile)\n', (22185, 22224), False, 'import atexit\n'), ((13613, 13647), 'os.path.join', 'os.path.join', (['logdir', '"""radssh.log"""'], {}), "(logdir, 'radssh.log')\n", (13625, 13647), False, 'import os\n'), ((13781, 13800), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (13798, 13800), False, 'import logging\n'), ((19978, 19997), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (19995, 19997), False, 'import sys\n'), ((21942, 21974), 'os.environ.get', 'os.environ.get', (['"""HISTSIZE"""', '(1000)'], {}), "('HISTSIZE', 1000)\n", (21956, 21974), False, 'import os\n'), ((3404, 3429), 'os.path.basename', 'os.path.basename', (['args[1]'], {}), '(args[1])\n', (3420, 3429), False, 'import os\n'), ((3490, 3515), 'os.path.basename', 'os.path.basename', (['args[0]'], {}), '(args[0])\n', (3506, 3515), False, 'import os\n'), ((8317, 8337), 'os.listdir', 'os.listdir', (['path_dir'], {}), '(path_dir)\n', (8327, 8337), False, 'import os\n'), ((9618, 9641), 'os.path.join', 'os.path.join', (['parent', 'x'], {}), '(parent, x)\n', (9630, 9641), False, 'import os\n'), ((10609, 10632), 'os.path.join', 'os.path.join', (['parent', 'x'], {}), '(parent, x)\n', (10621, 10632), False, 'import os\n'), ((10656, 10680), 'os.path.isdir', 'os.path.isdir', (['full_path'], {}), '(full_path)\n', (10669, 10680), False, 'import os\n'), ((12649, 12666), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (12656, 12666), False, 'import os\n'), ((3311, 3336), 'os.path.basename', 'os.path.basename', (['args[0]'], {}), '(args[0])\n', (3327, 3336), False, 'import os\n'), ((12698, 12709), 'time.time', 'time.time', ([], {}), '()\n', (12707, 12709), False, 'import time\n'), ((15377, 15409), 'os.path.join', 'os.path.join', (['plugin_dir', 'module'], {}), '(plugin_dir, module)\n', (15389, 15409), False, 'import os\n'), ((3132, 3172), 'os.path.join', 'os.path.join', (['logdir', '"""session.commands"""'], {}), "(logdir, 'session.commands')\n", (3144, 3172), False, 'import os\n'), ((11232, 11253), 'readline.get_endidx', 'readline.get_endidx', ([], {}), '()\n', (11251, 11253), False, 'import readline\n'), ((8413, 8438), 'os.path.join', 'os.path.join', (['path_dir', 'f'], {}), '(path_dir, f)\n', (8425, 8438), False, 'import os\n'), ((8523, 8548), 'os.path.join', 'os.path.join', (['path_dir', 'f'], {}), '(path_dir, f)\n', (8535, 8548), False, 'import os\n')] |
import os
import sys
import logging
file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, f"{file_dir}/../")
from db import session, Protein, SequenceAlign, StructureValidation
from utils import download_cif
from sqlalchemy import or_
import requests
logging.basicConfig(format="%(levelname)s: %(message)s", level="INFO")
def handle_cif_line(line, expectedtype):
value = None
parts = line.split()
if len(parts) > 1:
try:
value = expectedtype(parts[1])
except:
pass
return value
def save_experimental_conditions(idcode, pid):
"""
_exptl_crystal_grow.pH 4.5 70%
_exptl_crystal_grow.temp 277.15 73.0%
_em_buffer.pH 7.5
_pdbx_nmr_exptl_sample_conditions.temperature 308
_pdbx_nmr_exptl_sample_conditions.pH 3.8
"""
exists_experimental = (
session.query(Protein.pid)
.filter(Protein.pid == pid)
.filter(or_(Protein.exp_ph != None, Protein.exp_temp != None))
.first()
)
if exists_experimental:
logging.info(f"{idcode} experimental conditions already exist.")
return
cif_file = download_cif(idcode, pid)
temperature = None
ph = None
for line in cif_file.splitlines():
if line.startswith("_exptl_crystal_grow.pH"):
tmp_ph = handle_cif_line(line, float)
if tmp_ph:
ph = tmp_ph
elif line.startswith("_exptl_crystal_grow.temp"):
tmp_temp = handle_cif_line(line, float)
if tmp_temp:
temperature = tmp_temp
elif line.startswith("_em_buffer.pH"):
tmp_ph = handle_cif_line(line, float)
if tmp_ph:
ph = tmp_ph
elif line.startswith("_pdbx_nmr_exptl_sample_conditions.temperature"):
tmp_temp = handle_cif_line(line, float)
if tmp_temp:
temperature = tmp_temp
elif line.startswith("_em_buffer.pH"):
tmp_ph = handle_cif_line(line, float)
if tmp_ph:
ph = tmp_ph
if ph or temperature:
protein = session.query(Protein).filter_by(pid=pid).first()
if ph:
protein.exp_ph = ph
if temperature:
protein.exp_temp = temperature
session.commit()
def save_sequence_info(idcode, pid):
"""
https://data.rcsb.org/rest/v1/core/entry/2AT1
"polymer_entity_ids":["1","2"]
https://data.rcsb.org/rest/v1/core/polymer_entity/2AT1/1
"asym_ids":["A","C"]
https://data.rcsb.org/rest/v1/core/uniprot/2AT1/1
"rcsb_id":"P0A786"
"rcsb_uniprot_accession":["P0A786","P00479","Q2M662","Q47555","Q47557"]
"feature_positions":[{"beg_seq_id":2,"end_seq_id":311}]}
"""
exists_sequence = (
session.query(SequenceAlign.pid).filter(SequenceAlign.pid == pid).first()
)
if exists_sequence:
logging.info(f"{idcode} sequence alignment information already exist.")
return
r = requests.get(f"https://data.rcsb.org/rest/v1/core/entry/{idcode}")
if not r.ok:
return
entities = r.json()["rcsb_entry_container_identifiers"]["polymer_entity_ids"]
for entity in entities:
r = requests.get(
f"https://data.rcsb.org/rest/v1/core/polymer_entity/{idcode}/{entity}"
)
if not r.ok:
continue
content = r.json()
chains = None
if (
"rcsb_polymer_entity_container_identifiers" in content
and "auth_asym_ids" in content["rcsb_polymer_entity_container_identifiers"]
):
chains = content["rcsb_polymer_entity_container_identifiers"][
"auth_asym_ids"
]
r = requests.get(
f"https://data.rcsb.org/rest/v1/core/uniprot/{idcode}/{entity}"
)
if not r.ok:
continue
content = r.json()[0]
rcsb_id = None
uniprot_accession_codes = None
seq_align_beg = None
seq_align_end = None
if "rcsb_id" in content:
rcsb_id = content["rcsb_id"]
if "rcsb_uniprot_accession" in content:
uniprot_accession_codes = content["rcsb_uniprot_accession"]
if (
"rcsb_uniprot_feature" in content
and "feature_positions" in content["rcsb_uniprot_feature"][0]
):
align = content["rcsb_uniprot_feature"][0]["feature_positions"][0]
if "beg_seq_id" in align:
seq_align_beg = align["beg_seq_id"]
if "end_seq_id" in align:
seq_align_end = align["end_seq_id"]
new_seqalign = SequenceAlign(
pid=pid,
entity=entity,
rcsb_id=rcsb_id,
uniprot_accession_codes=uniprot_accession_codes,
chains=chains,
seq_align_beg=seq_align_beg,
seq_align_end=seq_align_end,
)
session.add(new_seqalign)
session.commit()
def save_structure_quality(idcode, pid):
"""
https://data.rcsb.org/rest/v1/core/entry/4LZT
ls_rfactor_rfree
clashscore
percent_ramachandran_outliers
percent_rotamer_outliers
percent_rsrzoutliers
"""
exists_validation = (
session.query(StructureValidation.pid)
.filter(StructureValidation.pid == pid)
.all()
)
if exists_validation:
logging.info(f"{idcode} structure validation already exist.")
return
r = requests.get(f"https://data.rcsb.org/rest/v1/core/entry/{idcode}")
if not r.ok:
logging.error(f"Unable to get structure validation info about {idcode}")
return
content = r.json()
rfree = None
clashscore = None
percent_ramachandran_outliers = None
percent_rotamer_outliers = None
percent_rsrzoutliers = None
if "refine" in content and "ls_rfactor_rfree" in content["refine"][0]:
rfree = content["refine"][0]["ls_rfactor_rfree"]
if "pdbx_vrpt_summary" in content:
val_summ = content["pdbx_vrpt_summary"]
if "clashscore" in val_summ:
clashscore = val_summ["clashscore"]
if "percent_ramachandran_outliers" in val_summ:
percent_ramachandran_outliers = val_summ["percent_ramachandran_outliers"]
if "percent_rotamer_outliers" in val_summ:
percent_rotamer_outliers = val_summ["percent_rotamer_outliers"]
if "percent_rsrzoutliers" in val_summ:
percent_rsrzoutliers = val_summ["percent_rsrzoutliers"]
new_val = StructureValidation(
pid=pid,
rfree=rfree,
clashscore=clashscore,
rama=percent_ramachandran_outliers,
rota=percent_rotamer_outliers,
rsrz=percent_rsrzoutliers,
)
session.add(new_val)
session.commit()
if __name__ == "__main__":
idcode = sys.argv[1].lower()
print("############", idcode, "############")
pid = session.query(Protein.pid).filter_by(idcode=idcode).first()[0]
save_experimental_conditions(idcode, pid)
save_sequence_info(idcode, pid)
save_structure_quality(idcode, pid) | [
"logging.basicConfig",
"sys.path.insert",
"db.session.add",
"db.StructureValidation",
"requests.get",
"db.session.query",
"utils.download_cif",
"db.SequenceAlign",
"os.path.abspath",
"db.session.commit",
"sqlalchemy.or_",
"logging.info",
"logging.error"
] | [((91, 128), 'sys.path.insert', 'sys.path.insert', (['(1)', 'f"""{file_dir}/../"""'], {}), "(1, f'{file_dir}/../')\n", (106, 128), False, 'import sys\n'), ((272, 342), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': '"""INFO"""'}), "(format='%(levelname)s: %(message)s', level='INFO')\n", (291, 342), False, 'import logging\n'), ((64, 89), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import os\n'), ((1229, 1254), 'utils.download_cif', 'download_cif', (['idcode', 'pid'], {}), '(idcode, pid)\n', (1241, 1254), False, 'from utils import download_cif\n'), ((3068, 3134), 'requests.get', 'requests.get', (['f"""https://data.rcsb.org/rest/v1/core/entry/{idcode}"""'], {}), "(f'https://data.rcsb.org/rest/v1/core/entry/{idcode}')\n", (3080, 3134), False, 'import requests\n'), ((5028, 5044), 'db.session.commit', 'session.commit', ([], {}), '()\n', (5042, 5044), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((5540, 5606), 'requests.get', 'requests.get', (['f"""https://data.rcsb.org/rest/v1/core/entry/{idcode}"""'], {}), "(f'https://data.rcsb.org/rest/v1/core/entry/{idcode}')\n", (5552, 5606), False, 'import requests\n'), ((6597, 6765), 'db.StructureValidation', 'StructureValidation', ([], {'pid': 'pid', 'rfree': 'rfree', 'clashscore': 'clashscore', 'rama': 'percent_ramachandran_outliers', 'rota': 'percent_rotamer_outliers', 'rsrz': 'percent_rsrzoutliers'}), '(pid=pid, rfree=rfree, clashscore=clashscore, rama=\n percent_ramachandran_outliers, rota=percent_rotamer_outliers, rsrz=\n percent_rsrzoutliers)\n', (6616, 6765), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((6815, 6835), 'db.session.add', 'session.add', (['new_val'], {}), '(new_val)\n', (6826, 6835), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((6840, 6856), 'db.session.commit', 'session.commit', ([], {}), '()\n', (6854, 6856), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((1133, 1197), 'logging.info', 'logging.info', (['f"""{idcode} experimental conditions already exist."""'], {}), "(f'{idcode} experimental conditions already exist.')\n", (1145, 1197), False, 'import logging\n'), ((2369, 2385), 'db.session.commit', 'session.commit', ([], {}), '()\n', (2383, 2385), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((2972, 3043), 'logging.info', 'logging.info', (['f"""{idcode} sequence alignment information already exist."""'], {}), "(f'{idcode} sequence alignment information already exist.')\n", (2984, 3043), False, 'import logging\n'), ((3291, 3380), 'requests.get', 'requests.get', (['f"""https://data.rcsb.org/rest/v1/core/polymer_entity/{idcode}/{entity}"""'], {}), "(\n f'https://data.rcsb.org/rest/v1/core/polymer_entity/{idcode}/{entity}')\n", (3303, 3380), False, 'import requests\n'), ((3802, 3879), 'requests.get', 'requests.get', (['f"""https://data.rcsb.org/rest/v1/core/uniprot/{idcode}/{entity}"""'], {}), "(f'https://data.rcsb.org/rest/v1/core/uniprot/{idcode}/{entity}')\n", (3814, 3879), False, 'import requests\n'), ((4718, 4902), 'db.SequenceAlign', 'SequenceAlign', ([], {'pid': 'pid', 'entity': 'entity', 'rcsb_id': 'rcsb_id', 'uniprot_accession_codes': 'uniprot_accession_codes', 'chains': 'chains', 'seq_align_beg': 'seq_align_beg', 'seq_align_end': 'seq_align_end'}), '(pid=pid, entity=entity, rcsb_id=rcsb_id,\n uniprot_accession_codes=uniprot_accession_codes, chains=chains,\n seq_align_beg=seq_align_beg, seq_align_end=seq_align_end)\n', (4731, 4902), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((4998, 5023), 'db.session.add', 'session.add', (['new_seqalign'], {}), '(new_seqalign)\n', (5009, 5023), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((5454, 5515), 'logging.info', 'logging.info', (['f"""{idcode} structure validation already exist."""'], {}), "(f'{idcode} structure validation already exist.')\n", (5466, 5515), False, 'import logging\n'), ((5632, 5704), 'logging.error', 'logging.error', (['f"""Unable to get structure validation info about {idcode}"""'], {}), "(f'Unable to get structure validation info about {idcode}')\n", (5645, 5704), False, 'import logging\n'), ((1019, 1072), 'sqlalchemy.or_', 'or_', (['(Protein.exp_ph != None)', '(Protein.exp_temp != None)'], {}), '(Protein.exp_ph != None, Protein.exp_temp != None)\n', (1022, 1072), False, 'from sqlalchemy import or_\n'), ((2860, 2892), 'db.session.query', 'session.query', (['SequenceAlign.pid'], {}), '(SequenceAlign.pid)\n', (2873, 2892), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((5312, 5350), 'db.session.query', 'session.query', (['StructureValidation.pid'], {}), '(StructureValidation.pid)\n', (5325, 5350), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((2197, 2219), 'db.session.query', 'session.query', (['Protein'], {}), '(Protein)\n', (2210, 2219), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((6981, 7007), 'db.session.query', 'session.query', (['Protein.pid'], {}), '(Protein.pid)\n', (6994, 7007), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n'), ((940, 966), 'db.session.query', 'session.query', (['Protein.pid'], {}), '(Protein.pid)\n', (953, 966), False, 'from db import session, Protein, SequenceAlign, StructureValidation\n')] |
import os
from typing import Type
class Config(object):
# Flask.
DEBUG = True
FLASK_ENV = os.getenv("FLASK_ENV", "DEBUG")
# Telegram.
TELEGRAM_TOKEN = ""
class ProductionConfig(Config):
DEBUG = False
# Telegram.
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN", "")
class DebugConfig(Config):
DEBUG = True
# Telegram.
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN_TEST", "")
class TestConfig(Config):
DEBUG = True
# Telegram.
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN_TEST", "")
def get_config_from_env() -> Type[Config]:
if Config.FLASK_ENV == "DEBUG":
return DebugConfig
elif Config.FLASK_ENV == "PROD":
return ProductionConfig
elif Config.FLASK_ENV == "TEST":
return TestConfig
else:
raise NotImplementedError("Unknown environment %s" % Config.FLASK_ENV)
| [
"os.getenv"
] | [((104, 135), 'os.getenv', 'os.getenv', (['"""FLASK_ENV"""', '"""DEBUG"""'], {}), "('FLASK_ENV', 'DEBUG')\n", (113, 135), False, 'import os\n'), ((265, 296), 'os.getenv', 'os.getenv', (['"""TELEGRAM_TOKEN"""', '""""""'], {}), "('TELEGRAM_TOKEN', '')\n", (274, 296), False, 'import os\n'), ((380, 416), 'os.getenv', 'os.getenv', (['"""TELEGRAM_TOKEN_TEST"""', '""""""'], {}), "('TELEGRAM_TOKEN_TEST', '')\n", (389, 416), False, 'import os\n'), ((499, 535), 'os.getenv', 'os.getenv', (['"""TELEGRAM_TOKEN_TEST"""', '""""""'], {}), "('TELEGRAM_TOKEN_TEST', '')\n", (508, 535), False, 'import os\n')] |
import click
from gradient.cli import common
from gradient.cli.clusters import clusters
from gradient.cli.common import api_key_option
from gradient.commands.machine_types import ListMachineTypesCommand
@clusters.group("machineTypes", help="Manage machine types")
def machine_types_group():
pass
@machine_types_group.command("list", help="List available machine types")
@click.option(
"--clusterId",
"cluster_id",
help="Filter machine types by cluster ID",
cls=common.GradientOption,
)
@api_key_option
@common.options_file
def list_machine_types(cluster_id=None, options_file=None, api_key=None):
command = ListMachineTypesCommand(api_key=api_key)
command.execute(cluster_id=cluster_id)
| [
"click.option",
"gradient.commands.machine_types.ListMachineTypesCommand",
"gradient.cli.clusters.clusters.group"
] | [((207, 266), 'gradient.cli.clusters.clusters.group', 'clusters.group', (['"""machineTypes"""'], {'help': '"""Manage machine types"""'}), "('machineTypes', help='Manage machine types')\n", (221, 266), False, 'from gradient.cli.clusters import clusters\n'), ((380, 496), 'click.option', 'click.option', (['"""--clusterId"""', '"""cluster_id"""'], {'help': '"""Filter machine types by cluster ID"""', 'cls': 'common.GradientOption'}), "('--clusterId', 'cluster_id', help=\n 'Filter machine types by cluster ID', cls=common.GradientOption)\n", (392, 496), False, 'import click\n'), ((636, 676), 'gradient.commands.machine_types.ListMachineTypesCommand', 'ListMachineTypesCommand', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (659, 676), False, 'from gradient.commands.machine_types import ListMachineTypesCommand\n')] |
from logging import log
from tensorflow.python.ops.variables import model_variables
from signver.extractor import MetricExtractor
def test_extractor_load():
model_path = "models/extractor/metric"
extractor = MetricExtractor()
extractor.load(model_path)
assert extractor.model is not None
| [
"signver.extractor.MetricExtractor"
] | [((219, 236), 'signver.extractor.MetricExtractor', 'MetricExtractor', ([], {}), '()\n', (234, 236), False, 'from signver.extractor import MetricExtractor\n')] |
import threading
import numpy as np
import time
import rospy
from sensor_msgs import point_cloud2
from std_msgs.msg import Header
from sensor_msgs.msg import PointCloud2, PointField
from stella_nav_core.geometry_utils import GeometryUtils
from stella_nav_core.config import CostConfig, MotionConfig
class State(object):
def __init__(self, x, y, theta, vx, avz, mask_rotation=None, cost=None, costs=None,
accum_cost=0.0, trajectory=None, accum_trajectory=np.array([], dtype=np.float64).reshape(0, 3), level=1, backtrace=[]):
self.x = x
self.y = y
self.theta = theta
self.vx = vx
self.avz = avz
self.mask_rotation = mask_rotation
self.cost = cost
self.accum_cost = accum_cost
self.costs = costs
self.trajectory = trajectory
self.accum_trajectory = accum_trajectory
self.level = level
self.backtrace = backtrace
class DWAPlanner(object):
LETHAL_COST = 1000.0
def __init__(
self, costmaps, costmap, linear_motion_config, angular_motion_config,
dt=0.1, heading_lookahead=0.1, predict_time=1.0, search_level=1,
default_road_width=0.5, heading_lethal_angle=np.pi/4, debug_cloud=True,
angular_speed_cost_config=CostConfig(0.01, 1.0), speed_cost_config=CostConfig(0.01, 1.0),
heading_cost_config=CostConfig(0.01, 1.0), goal_cost_config=CostConfig(1.0, 5.0),
obstacle_cost_config=CostConfig(100.0, 100.0)
):
self._linear_motion_config = MotionConfig(**linear_motion_config)
self._angular_motion_config = MotionConfig(**angular_motion_config)
self._dt = dt
self._predict_time = predict_time
self._search_level = search_level
self._twist = None
self._heading_lookahead = heading_lookahead
self._debug_cloud = debug_cloud
self._angular_speed_cost_config = CostConfig(**angular_speed_cost_config)
self._speed_cost_config = CostConfig(**speed_cost_config)
self._heading_cost_config = CostConfig(**heading_cost_config)
self._goal_cost_config = CostConfig(**goal_cost_config)
self._obstacle_cost_config = CostConfig(**obstacle_cost_config)
self._default_road_width = default_road_width
self._heading_lethal_angle = heading_lethal_angle
self._costmaps = costmaps
self._costmap = costmaps[costmap]
self._cost_pub = rospy.Publisher("~dwa_planner/cost_cloud", PointCloud2, queue_size=1)
self._lethal_cost_pub = rospy.Publisher("~dwa_planner/lethal_cost_cloud", PointCloud2, queue_size=1)
self._rotation_cost_pub = rospy.Publisher("~dwa_planner/rotation_cost_cloud", PointCloud2, queue_size=1)
self._fields = [
PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1),
PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1),
PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1),
PointField(name="speed", offset=12, datatype=PointField.FLOAT32, count=1),
PointField(name="obstacle", offset=16, datatype=PointField.FLOAT32, count=1),
PointField(name="goal", offset=20, datatype=PointField.FLOAT32, count=1),
PointField(name="angular_speed", offset=24, datatype=PointField.FLOAT32, count=1),
PointField(name="heading", offset=28, datatype=PointField.FLOAT32, count=1),
PointField(name="total", offset=32, datatype=PointField.FLOAT32, count=1),
]
self.lock = threading.RLock()
def update_twist(self, twist):
self.lock.acquire()
self._twist = twist
self.lock.release()
def _trajectory(self, x, y, theta, vx, avz):
t = np.linspace(0, self._predict_time, self._predict_time / self._dt)[np.newaxis, :, np.newaxis]
v = np.repeat(
np.vstack((vx * np.cos(theta), vx * np.sin(theta), np.zeros(vx.shape))).T[:, np.newaxis, :],
t.shape[1], axis=1)
pos = np.array((x, y, theta))[np.newaxis, np.newaxis, :]
traj = np.zeros(v.shape)
traj[avz != 0.0] = np.vstack(
((vx / avz) * (np.sin(avz * t + theta) - np.sin(theta)) + x,
(vx / avz) * (np.cos(theta) - np.cos(avz * t + theta)) + y,
avz * t + theta)).T
return traj
def _heading_cost(self, scoring_point, goal):
target_yaw = GeometryUtils.get_yaw(goal.pose.orientation)
angle = np.abs(GeometryUtils.regulate_rad(target_yaw - scoring_point[:, 0, 2]))
cost = self._heading_cost_config.get_cost(angle / np.pi)
cost[angle > self._heading_lethal_angle] += DWAPlanner.LETHAL_COST
return cost
def _angular_speed_cost(self, avz):
return self._angular_speed_cost_config.get_cost(np.abs(avz) / self._linear_motion_config.max_speed)
def _speed_cost(self, vx):
max_speed = max(self._linear_motion_config.max_speed, -self._linear_motion_config.min_speed)
return self._speed_cost_config.get_cost(max_speed - np.abs(vx)) / max_speed
def _speed_cost2(self, vx, scoring_point, goal):
target_yaw = GeometryUtils.get_yaw(goal.pose.orientation)
theta = scoring_point[:, 0, 2] - target_yaw
max_speed = max(self._linear_motion_config.max_speed, -self._linear_motion_config.min_speed)
return self._speed_cost_config.get_cost(
(max_speed - np.abs(vx) * np.cos(theta)) / max_speed
)
def _obstacle_cost(self, traj, scoring_point, costmap):
yaw = scoring_point[:, :, 2]
bias = np.stack((self._heading_lookahead * np.cos(yaw), self._heading_lookahead * np.sin(yaw)), axis=-1)
lethal_cost = np.zeros((scoring_point.shape[0], 1))
lethal_yaw = traj[:, :, 2]
lethal_look_point = traj[:, :, :2] + np.stack((self._heading_lookahead * np.cos(lethal_yaw), self._heading_lookahead * np.sin(lethal_yaw)), axis=-1)
current_pos = traj[:, 0:1, :2]
current_yaw = traj[:, 0:1, 2]
current_bias = np.stack((self._heading_lookahead * np.cos(current_yaw), self._heading_lookahead * np.sin(current_yaw)), axis=-1)
current_look_point = current_pos + current_bias
lethal_cost[np.any(
(costmap.get_value_from_world(lethal_look_point) > 0.99) * (np.linalg.norm(current_look_point - lethal_look_point, axis=2) > 1e-3),
axis=1)] = DWAPlanner.LETHAL_COST
look_point = scoring_point[:, :, :2] + bias
cost = self._obstacle_cost_config.get_cost(costmap.get_value_from_world(look_point))
return (cost + lethal_cost).reshape(cost.shape[0])
def _explicit_goal_cost(self, scoring_point, goal):
yaw = scoring_point[:, 0, 2]
return self._goal_cost_config.get_cost(np.hypot(
goal.pose.position.x - (scoring_point[:, 0, 0] + self._heading_lookahead * np.cos(yaw)),
goal.pose.position.y - (scoring_point[:, 0, 1] + self._heading_lookahead * np.sin(yaw))))
def _goal_cost(self, scoring_point, goal):
robot_yaw = scoring_point[:, 0, 2]
robot_pos = np.array(
(scoring_point[:, 0, 0] + self._heading_lookahead * np.cos(robot_yaw),
scoring_point[:, 0, 1] + self._heading_lookahead * np.sin(robot_yaw))).T
goal_pos = np.array((goal.pose.position.x, goal.pose.position.y))
u = robot_pos - goal_pos
goal_yaw = GeometryUtils.get_yaw(goal.pose.orientation)
v = (np.cos(goal_yaw), np.sin(goal_yaw))
square_distance = np.square(np.cross(u, v)) / np.square(goal.data.get("road_width", self._default_road_width))
cost = self._goal_cost_config.get_cost(square_distance)
cost[square_distance > 1.0] += DWAPlanner.LETHAL_COST
return cost
def _cost(self, trajectory, costmap, goal, vx, avz):
scoring_point = trajectory[:, -1:, :]
# speed_cost = self._speed_cost(vx)
speed_cost = self._speed_cost2(vx, scoring_point, goal)
obstacle_cost = self._obstacle_cost(trajectory, scoring_point, costmap)
if goal.data.get("explicit", None):
goal_cost = self._explicit_goal_cost(scoring_point, goal)
else:
goal_cost = self._goal_cost(scoring_point, goal)
angular_speed_cost = self._angular_speed_cost(avz)
heading_cost = self._heading_cost(scoring_point, goal)
costs = (speed_cost, obstacle_cost, goal_cost, angular_speed_cost, heading_cost)
return sum(costs), np.vstack(costs)
def _dynamic_window(self, linear_vx, angular_vz):
dw = [
max(self._linear_motion_config.min_speed, min(self._linear_motion_config.max_speed, linear_vx - self._linear_motion_config.max_accel * self._dt)),
min(self._linear_motion_config.max_speed, max(self._linear_motion_config.min_speed, linear_vx + self._linear_motion_config.max_accel * self._dt)),
max(self._angular_motion_config.min_speed, min(self._angular_motion_config.max_speed, angular_vz - self._angular_motion_config.max_accel * self._dt)),
min(self._angular_motion_config.max_speed, max(self._angular_motion_config.min_speed, angular_vz + self._angular_motion_config.max_accel * self._dt))
]
return dw
def _sample_v(self, dw):
_vx = np.linspace(dw[0], dw[1], self._linear_motion_config.samples)
_avz = np.linspace(dw[2], dw[3], self._angular_motion_config.samples)
_avz[_avz == 0.0] = 1e-6
vx, avz = np.meshgrid(_vx, _avz)
vx = vx.flatten()
avz = avz.flatten()
mask_rotation = vx == dw[0]
return vx, avz, mask_rotation
def _publish_cloud(self, trajectory, cost, costs, mask_rotation):
header = Header(frame_id="map")
points = np.vstack((trajectory[:, -1, :2].T, np.zeros(cost.shape), costs, cost)).T
mask = mask_rotation
x, y = trajectory[0, 0, :2]
theta_rot = trajectory[mask, -1, 2:3].T
r = 0.1
points_rot = np.vstack((x + r * np.cos(theta_rot), y + r * np.sin(theta_rot), np.zeros(cost[mask].shape), [c[mask] for c in costs], cost[mask])).T
points_rot_filtered = points_rot[points_rot[:, 3 + len(costs)] < DWAPlanner.LETHAL_COST]
points_filtered = points[points[:, 3 + len(costs)] < DWAPlanner.LETHAL_COST]
points_filtered_out = points[points[:, 3 + len(costs)] > DWAPlanner.LETHAL_COST - 1]
cost_msg = point_cloud2.create_cloud(header, self._fields, points_filtered)
lethal_cost_msg = point_cloud2.create_cloud(header, self._fields, points_filtered_out)
rotation_cost_msg = point_cloud2.create_cloud(header, self._fields, points_rot_filtered)
try:
self._cost_pub.publish(cost_msg)
self._lethal_cost_pub.publish(lethal_cost_msg)
self._rotation_cost_pub.publish(rotation_cost_msg)
except rospy.ROSException as e:
rospy.logdebug("DWAPlanner: {}".format(e))
def plan(self, pose, goal):
self.lock.acquire()
twist = self._twist
self.lock.release()
if twist is None:
return np.array((0.0, 0.0)), None
self._costmap.lock.acquire()
costmap = self._costmap.clone()
self._costmap.lock.release()
x = pose.pose.position.x
y = pose.pose.position.y
theta = GeometryUtils.get_yaw(pose.pose.orientation)
linear_vx = twist.linear.x
angular_vz = twist.angular.z
states = [State(x, y, theta, linear_vx, angular_vz)]
results = []
while states:
state = states.pop()
sample_vx, sample_avz, mask_rotation = self._sample_v(dw=self._dynamic_window(state.vx, state.avz))
trajectory = self._trajectory(state.x, state.y, state.theta, sample_vx, sample_avz)
cost, costs = self._cost(trajectory, costmap, goal, sample_vx, sample_avz)
for i in range(len(sample_vx)):
_vx = sample_vx[i]
_avz = sample_avz[i]
_mask_rotation = mask_rotation
_cost = cost
_costs = costs
_accum_cost = cost[i] + state.accum_cost
_x = trajectory[i, -1, 0]
_y = trajectory[i, -1, 1]
_theta = trajectory[i, -1, 2]
_trajectory = trajectory
_accum_trajectory = np.vstack((trajectory[i], state.accum_trajectory))
_backtrace = state.backtrace + [state]
new_state = State(_x, _y, _theta, _vx, _avz, _mask_rotation, _cost, _costs, _accum_cost, _trajectory, _accum_trajectory, state.level + 1, _backtrace)
if state.level < self._search_level:
states.append(new_state)
else:
results.append(new_state)
min_cost = None
min_idx = None
min_score = None
for state in results:
if min_cost is None or state.accum_cost < min_cost:
min_cost = state.accum_cost
min_score = (state.cost, state.vx, state.avz, state.trajectory)
min_backtrace = state.backtrace + [state]
if self._debug_cloud:
for state in min_backtrace[1:]:
self._publish_cloud(state.trajectory, state.cost, state.costs, state.mask_rotation)
if min_score is None:
min_score = (10000.0, 0.0, 0.0, [])
return min_score[1:3], min_score[3]
| [
"numpy.array",
"stella_nav_core.geometry_utils.GeometryUtils.get_yaw",
"numpy.linalg.norm",
"numpy.sin",
"stella_nav_core.config.MotionConfig",
"numpy.cross",
"threading.RLock",
"stella_nav_core.config.CostConfig",
"numpy.linspace",
"sensor_msgs.point_cloud2.create_cloud",
"numpy.vstack",
"num... | [((1289, 1310), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(0.01)', '(1.0)'], {}), '(0.01, 1.0)\n', (1299, 1310), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1330, 1351), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(0.01)', '(1.0)'], {}), '(0.01, 1.0)\n', (1340, 1351), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1385, 1406), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(0.01)', '(1.0)'], {}), '(0.01, 1.0)\n', (1395, 1406), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1425, 1445), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (1435, 1445), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1480, 1504), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(100.0)', '(100.0)'], {}), '(100.0, 100.0)\n', (1490, 1504), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1549, 1585), 'stella_nav_core.config.MotionConfig', 'MotionConfig', ([], {}), '(**linear_motion_config)\n', (1561, 1585), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1624, 1661), 'stella_nav_core.config.MotionConfig', 'MotionConfig', ([], {}), '(**angular_motion_config)\n', (1636, 1661), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1929, 1968), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**angular_speed_cost_config)\n', (1939, 1968), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2003, 2034), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**speed_cost_config)\n', (2013, 2034), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2071, 2104), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**heading_cost_config)\n', (2081, 2104), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2138, 2168), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**goal_cost_config)\n', (2148, 2168), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2206, 2240), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**obstacle_cost_config)\n', (2216, 2240), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2454, 2523), 'rospy.Publisher', 'rospy.Publisher', (['"""~dwa_planner/cost_cloud"""', 'PointCloud2'], {'queue_size': '(1)'}), "('~dwa_planner/cost_cloud', PointCloud2, queue_size=1)\n", (2469, 2523), False, 'import rospy\n'), ((2556, 2632), 'rospy.Publisher', 'rospy.Publisher', (['"""~dwa_planner/lethal_cost_cloud"""', 'PointCloud2'], {'queue_size': '(1)'}), "('~dwa_planner/lethal_cost_cloud', PointCloud2, queue_size=1)\n", (2571, 2632), False, 'import rospy\n'), ((2667, 2745), 'rospy.Publisher', 'rospy.Publisher', (['"""~dwa_planner/rotation_cost_cloud"""', 'PointCloud2'], {'queue_size': '(1)'}), "('~dwa_planner/rotation_cost_cloud', PointCloud2, queue_size=1)\n", (2682, 2745), False, 'import rospy\n'), ((3581, 3598), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (3596, 3598), False, 'import threading\n'), ((4114, 4131), 'numpy.zeros', 'np.zeros', (['v.shape'], {}), '(v.shape)\n', (4122, 4131), True, 'import numpy as np\n'), ((4441, 4485), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['goal.pose.orientation'], {}), '(goal.pose.orientation)\n', (4462, 4485), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((5175, 5219), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['goal.pose.orientation'], {}), '(goal.pose.orientation)\n', (5196, 5219), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((5730, 5767), 'numpy.zeros', 'np.zeros', (['(scoring_point.shape[0], 1)'], {}), '((scoring_point.shape[0], 1))\n', (5738, 5767), True, 'import numpy as np\n'), ((7317, 7371), 'numpy.array', 'np.array', (['(goal.pose.position.x, goal.pose.position.y)'], {}), '((goal.pose.position.x, goal.pose.position.y))\n', (7325, 7371), True, 'import numpy as np\n'), ((7424, 7468), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['goal.pose.orientation'], {}), '(goal.pose.orientation)\n', (7445, 7468), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((9304, 9365), 'numpy.linspace', 'np.linspace', (['dw[0]', 'dw[1]', 'self._linear_motion_config.samples'], {}), '(dw[0], dw[1], self._linear_motion_config.samples)\n', (9315, 9365), True, 'import numpy as np\n'), ((9381, 9443), 'numpy.linspace', 'np.linspace', (['dw[2]', 'dw[3]', 'self._angular_motion_config.samples'], {}), '(dw[2], dw[3], self._angular_motion_config.samples)\n', (9392, 9443), True, 'import numpy as np\n'), ((9495, 9517), 'numpy.meshgrid', 'np.meshgrid', (['_vx', '_avz'], {}), '(_vx, _avz)\n', (9506, 9517), True, 'import numpy as np\n'), ((9734, 9756), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""map"""'}), "(frame_id='map')\n", (9740, 9756), False, 'from std_msgs.msg import Header\n'), ((10427, 10491), 'sensor_msgs.point_cloud2.create_cloud', 'point_cloud2.create_cloud', (['header', 'self._fields', 'points_filtered'], {}), '(header, self._fields, points_filtered)\n', (10452, 10491), False, 'from sensor_msgs import point_cloud2\n'), ((10518, 10586), 'sensor_msgs.point_cloud2.create_cloud', 'point_cloud2.create_cloud', (['header', 'self._fields', 'points_filtered_out'], {}), '(header, self._fields, points_filtered_out)\n', (10543, 10586), False, 'from sensor_msgs import point_cloud2\n'), ((10615, 10683), 'sensor_msgs.point_cloud2.create_cloud', 'point_cloud2.create_cloud', (['header', 'self._fields', 'points_rot_filtered'], {}), '(header, self._fields, points_rot_filtered)\n', (10640, 10683), False, 'from sensor_msgs import point_cloud2\n'), ((11345, 11389), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['pose.pose.orientation'], {}), '(pose.pose.orientation)\n', (11366, 11389), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((2783, 2851), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""x"""', 'offset': '(0)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='x', offset=0, datatype=PointField.FLOAT32, count=1)\n", (2793, 2851), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((2865, 2933), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""y"""', 'offset': '(4)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='y', offset=4, datatype=PointField.FLOAT32, count=1)\n", (2875, 2933), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((2947, 3015), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""z"""', 'offset': '(8)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='z', offset=8, datatype=PointField.FLOAT32, count=1)\n", (2957, 3015), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3029, 3102), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""speed"""', 'offset': '(12)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='speed', offset=12, datatype=PointField.FLOAT32, count=1)\n", (3039, 3102), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3116, 3192), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""obstacle"""', 'offset': '(16)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='obstacle', offset=16, datatype=PointField.FLOAT32, count=1)\n", (3126, 3192), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3206, 3278), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""goal"""', 'offset': '(20)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='goal', offset=20, datatype=PointField.FLOAT32, count=1)\n", (3216, 3278), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3292, 3377), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""angular_speed"""', 'offset': '(24)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='angular_speed', offset=24, datatype=PointField.FLOAT32,\n count=1)\n", (3302, 3377), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3387, 3462), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""heading"""', 'offset': '(28)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='heading', offset=28, datatype=PointField.FLOAT32, count=1)\n", (3397, 3462), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3476, 3549), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""total"""', 'offset': '(32)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='total', offset=32, datatype=PointField.FLOAT32, count=1)\n", (3486, 3549), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3781, 3846), 'numpy.linspace', 'np.linspace', (['(0)', 'self._predict_time', '(self._predict_time / self._dt)'], {}), '(0, self._predict_time, self._predict_time / self._dt)\n', (3792, 3846), True, 'import numpy as np\n'), ((4048, 4071), 'numpy.array', 'np.array', (['(x, y, theta)'], {}), '((x, y, theta))\n', (4056, 4071), True, 'import numpy as np\n'), ((4509, 4572), 'stella_nav_core.geometry_utils.GeometryUtils.regulate_rad', 'GeometryUtils.regulate_rad', (['(target_yaw - scoring_point[:, 0, 2])'], {}), '(target_yaw - scoring_point[:, 0, 2])\n', (4535, 4572), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((7482, 7498), 'numpy.cos', 'np.cos', (['goal_yaw'], {}), '(goal_yaw)\n', (7488, 7498), True, 'import numpy as np\n'), ((7500, 7516), 'numpy.sin', 'np.sin', (['goal_yaw'], {}), '(goal_yaw)\n', (7506, 7516), True, 'import numpy as np\n'), ((8502, 8518), 'numpy.vstack', 'np.vstack', (['costs'], {}), '(costs)\n', (8511, 8518), True, 'import numpy as np\n'), ((477, 507), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (485, 507), True, 'import numpy as np\n'), ((4831, 4842), 'numpy.abs', 'np.abs', (['avz'], {}), '(avz)\n', (4837, 4842), True, 'import numpy as np\n'), ((7554, 7568), 'numpy.cross', 'np.cross', (['u', 'v'], {}), '(u, v)\n', (7562, 7568), True, 'import numpy as np\n'), ((11121, 11141), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (11129, 11141), True, 'import numpy as np\n'), ((12382, 12432), 'numpy.vstack', 'np.vstack', (['(trajectory[i], state.accum_trajectory)'], {}), '((trajectory[i], state.accum_trajectory))\n', (12391, 12432), True, 'import numpy as np\n'), ((5076, 5086), 'numpy.abs', 'np.abs', (['vx'], {}), '(vx)\n', (5082, 5086), True, 'import numpy as np\n'), ((5646, 5657), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (5652, 5657), True, 'import numpy as np\n'), ((5685, 5696), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (5691, 5696), True, 'import numpy as np\n'), ((6097, 6116), 'numpy.cos', 'np.cos', (['current_yaw'], {}), '(current_yaw)\n', (6103, 6116), True, 'import numpy as np\n'), ((6144, 6163), 'numpy.sin', 'np.sin', (['current_yaw'], {}), '(current_yaw)\n', (6150, 6163), True, 'import numpy as np\n'), ((9810, 9830), 'numpy.zeros', 'np.zeros', (['cost.shape'], {}), '(cost.shape)\n', (9818, 9830), True, 'import numpy as np\n'), ((10063, 10089), 'numpy.zeros', 'np.zeros', (['cost[mask].shape'], {}), '(cost[mask].shape)\n', (10071, 10089), True, 'import numpy as np\n'), ((5447, 5457), 'numpy.abs', 'np.abs', (['vx'], {}), '(vx)\n', (5453, 5457), True, 'import numpy as np\n'), ((5460, 5473), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5466, 5473), True, 'import numpy as np\n'), ((5884, 5902), 'numpy.cos', 'np.cos', (['lethal_yaw'], {}), '(lethal_yaw)\n', (5890, 5902), True, 'import numpy as np\n'), ((5930, 5948), 'numpy.sin', 'np.sin', (['lethal_yaw'], {}), '(lethal_yaw)\n', (5936, 5948), True, 'import numpy as np\n'), ((6332, 6394), 'numpy.linalg.norm', 'np.linalg.norm', (['(current_look_point - lethal_look_point)'], {'axis': '(2)'}), '(current_look_point - lethal_look_point, axis=2)\n', (6346, 6394), True, 'import numpy as np\n'), ((3960, 3978), 'numpy.zeros', 'np.zeros', (['vx.shape'], {}), '(vx.shape)\n', (3968, 3978), True, 'import numpy as np\n'), ((6892, 6903), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (6898, 6903), True, 'import numpy as np\n'), ((6993, 7004), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (6999, 7004), True, 'import numpy as np\n'), ((7193, 7210), 'numpy.cos', 'np.cos', (['robot_yaw'], {}), '(robot_yaw)\n', (7199, 7210), True, 'import numpy as np\n'), ((7276, 7293), 'numpy.sin', 'np.sin', (['robot_yaw'], {}), '(robot_yaw)\n', (7282, 7293), True, 'import numpy as np\n'), ((10017, 10034), 'numpy.cos', 'np.cos', (['theta_rot'], {}), '(theta_rot)\n', (10023, 10034), True, 'import numpy as np\n'), ((10044, 10061), 'numpy.sin', 'np.sin', (['theta_rot'], {}), '(theta_rot)\n', (10050, 10061), True, 'import numpy as np\n'), ((3925, 3938), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3931, 3938), True, 'import numpy as np\n'), ((3945, 3958), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3951, 3958), True, 'import numpy as np\n'), ((4197, 4220), 'numpy.sin', 'np.sin', (['(avz * t + theta)'], {}), '(avz * t + theta)\n', (4203, 4220), True, 'import numpy as np\n'), ((4223, 4236), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4229, 4236), True, 'import numpy as np\n'), ((4270, 4283), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4276, 4283), True, 'import numpy as np\n'), ((4286, 4309), 'numpy.cos', 'np.cos', (['(avz * t + theta)'], {}), '(avz * t + theta)\n', (4292, 4309), True, 'import numpy as np\n')] |
"""Easily convert RGB video data (e.g. .avi) to the TensorFlow tfrecords file format with the provided 3 color channels.
Allows to subsequently train a neural network in TensorFlow with the generated tfrecords.
Due to common hardware/GPU RAM limitations, this implementation allows to limit the number of frames per
video actually stored in the tfrecords. The code automatically chooses the frame step size such that there is
an equal separation distribution of the video images. Implementation supports Optical Flow
(currently OpenCV's calcOpticalFlowFarneback) as an additional 4th channel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, math
from tensorflow.python.platform import gfile
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
import cv2 as cv2
import numpy as np
import tensorflow as tf
FLAGS = None
FILE_FILTER = '*.avi'
NUM_FRAMES_PER_VIDEO = 15
NUM_CHANNELS_VIDEO = 4
WIDTH_VIDEO = 128
HEIGHT_VIDEO = 128
SOURCE = '/insert/source/here'
DESTINATION = '/insert/destination/here'
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_videos', 1000, 'Number of videos stored in one single tfrecords file')
flags.DEFINE_string('image_color_depth', np.uint8, 'Color depth for the images stored in the tfrecords files. '
'Has to correspond to the source video color depth. '
'Specified as np dtype (e.g. ''np.uint8).')
flags.DEFINE_string('source', SOURCE, 'Directory with video files')
flags.DEFINE_string('output_path', DESTINATION, 'Directory for storing tf records')
flags.DEFINE_boolean('optical_flow', True, 'Indictes whether optical flow shall be computed and added as fourth '
'channel. Defaults to False')
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_chunks(l, n):
"""Yield successive n-sized chunks from l.
Used to create n sublists from a list l"""
for i in range(0, len(l), n):
yield l[i:i + n]
def getVideoCapture(path):
cap = None
if path:
cap = cv2.VideoCapture(path)
return cap
def getNextFrame(cap):
ret, frame = cap.read()
if ret == False:
return None
return np.asarray(frame)
def compute_dense_optical_flow(prev_image, current_image):
old_shape = current_image.shape
prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
assert current_image.shape == old_shape
hsv = np.zeros_like(prev_image)
hsv[..., 1] = 255
flow = cv2.calcOpticalFlowFarneback(prev_image_gray, current_image_gray, 0.8, 15, 5, 10, 5, 1.5, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
def save_video_to_tfrecords(source_path, destination_path, videos_per_file=FLAGS.num_videos, video_filenames=None,
dense_optical_flow=False):
"""calls sub-functions convert_video_to_numpy and save_numpy_to_tfrecords in order to directly export tfrecords files
:param source_path: directory where video videos are stored
:param destination_path: directory where tfrecords should be stored
:param videos_per_file: specifies the number of videos within one tfrecords file
:param dense_optical_flow: boolean flag that controls if optical flow should be used and added to tfrecords
"""
global NUM_CHANNELS_VIDEO
assert (NUM_CHANNELS_VIDEO == 3 and (not dense_optical_flow)) or (NUM_CHANNELS_VIDEO == 4 and dense_optical_flow), "correct NUM_CHANNELS_VIDEO"
if video_filenames is not None:
filenames = video_filenames
else:
filenames = gfile.Glob(os.path.join(source_path, FILE_FILTER))
if not filenames:
raise RuntimeError('No data files found.')
print('Total videos found: ' + str(len(filenames)))
filenames_split = list(get_chunks(filenames, videos_per_file))
for i, batch in enumerate(filenames_split):
data = convert_video_to_numpy(batch, dense_optical_flow=dense_optical_flow)
total_batch_number = int(math.ceil(len(filenames)/videos_per_file))
print('Batch ' + str(i+1) + '/' + str(total_batch_number))
save_numpy_to_tfrecords(data, destination_path, 'train_blobs_batch_', videos_per_file, i+1,
total_batch_number)
def save_numpy_to_tfrecords(data, destination_path, name, fragmentSize, current_batch_number, total_batch_number):
"""Converts an entire dataset into x tfrecords where x=videos/fragmentSize.
:param data: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos, i=number of images, c=number of image
channels, h=image height, w=image width
:param name: filename; data samples type (train|valid|test)
:param fragmentSize: specifies how many videos are stored in one tfrecords file
:param current_batch_number: indicates the current batch index (function call within loop)
:param total_batch_number: indicates the total number of batches
"""
num_videos = data.shape[0]
num_images = data.shape[1]
num_channels = data.shape[4]
height = data.shape[2]
width = data.shape[3]
writer = None
feature = {}
for videoCount in range((num_videos)):
if videoCount % fragmentSize == 0:
if writer is not None:
writer.close()
filename = os.path.join(destination_path, name + str(current_batch_number) + '_of_' + str(total_batch_number) + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for imageCount in range(num_images):
path = 'blob' + '/' + str(imageCount)
image = data[videoCount, imageCount, :, :, :]
image = image.astype(FLAGS.image_color_depth)
image_raw = image.tostring()
feature[path]= _bytes_feature(image_raw)
feature['height'] = _int64_feature(height)
feature['width'] = _int64_feature(width)
feature['depth'] = _int64_feature(num_channels)
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
if writer is not None:
writer.close()
def convert_video_to_numpy(filenames, dense_optical_flow=False):
"""Generates an ndarray from multiple video files given by filenames.
Implementation chooses frame step size automatically for a equal separation distribution of the video images.
:param filenames
:param type: processing type for video data
:return if no optical flow is used: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos, i=number of images,
(h,w)=height and width of image, c=channel, if optical flow is used: ndarray(uint32) of (v,i,h,w,
c+1)"""
global NUM_CHANNELS_VIDEO
if not filenames:
raise RuntimeError('No data files found.')
number_of_videos = len(filenames)
if dense_optical_flow:
# need an additional channel for the optical flow with one exception:
global NUM_CHANNELS_VIDEO
NUM_CHANNELS_VIDEO = 4
num_real_image_channel = 3
else:
# if no optical flow, make everything normal:
num_real_image_channel = NUM_CHANNELS_VIDEO
data = []
def video_file_to_ndarray(i, filename):
image = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel), dtype=FLAGS.image_color_depth)
video = np.zeros((NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO, NUM_CHANNELS_VIDEO), dtype=np.uint32)
imagePrev = None
assert os.path.isfile(filename), "Couldn't find video file"
cap = getVideoCapture(filename)
assert cap is not None, "Couldn't load video capture:" + filename + ". Moving to next video."
# compute meta data of video
frameCount = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# returns nan, if fps needed a measurement must be implemented
# frameRate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
steps = math.floor(frameCount / NUM_FRAMES_PER_VIDEO)
j = 0
prev_frame_none = False
restart = True
assert not (frameCount < 1 or steps < 1), str(filename) + " does not have enough frames. Moving to next video."
while restart:
for f in range(int(frameCount)):
# get next frame after 'steps' iterations:
# floor used after modulo operation because rounding module before leads to
# unhandy partition of data (big gab in the end)
if math.floor(f % steps) == 0:
frame = getNextFrame(cap)
# special case handling: opencv's frame count != real frame count, reiterate over same video
if frame is None and j < NUM_FRAMES_PER_VIDEO:
if frame and prev_frame_none: break
prev_frame_none = True
# repeat with smaller step size
steps -= 1
if steps == 0: break
print("reducing step size due to error")
j = 0
cap.release()
cap = getVideoCapture(filenames[i])
# wait for image retrieval to be ready
cv2.waitKey(3000)
video.fill(0)
continue
else:
if j >= NUM_FRAMES_PER_VIDEO:
restart = False
break
# iterate over channels
if frame.ndim == 2:
# cv returns 2 dim array if gray
resizedImage = cv2.resize(frame[:, :], (HEIGHT_VIDEO, WIDTH_VIDEO))
else:
for k in range(num_real_image_channel):
resizedImage = cv2.resize(frame[:, :, k], (HEIGHT_VIDEO, WIDTH_VIDEO))
image[:, :, k] = resizedImage
if dense_optical_flow:
# optical flow requires at least two images
if imagePrev is not None:
frameFlow = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO))
frameFlow = compute_dense_optical_flow(imagePrev, image)
frameFlow = cv2.cvtColor(frameFlow, cv2.COLOR_BGR2GRAY)
else:
frameFlow = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO))
imagePrev = image.copy()
if dense_optical_flow:
image_with_flow = image.copy()
image_with_flow = np.concatenate((image_with_flow, np.expand_dims(frameFlow, axis=2)), axis=2)
video[j, :, :, :] = image_with_flow
else:
video[j, :, :, :] = image
j += 1
# print('total frames: ' + str(j) + " frame in video: " + str(f))
else:
getNextFrame(cap)
print(str(i + 1) + " of " + str(number_of_videos) + " videos processed", filenames[i])
v = video.copy()
cap.release()
return v
for i, file in enumerate(filenames):
try:
v = video_file_to_ndarray(i, file)
data.append(v)
except Exception as e:
print(e)
return np.array(data)
def main(argv):
save_video_to_tfrecords(FLAGS.source, FLAGS.output_path, FLAGS.num_videos, dense_optical_flow=FLAGS.optical_flow)
if __name__ == '__main__':
app.run()
| [
"cv2.normalize",
"math.floor",
"tensorflow.train.Int64List",
"numpy.array",
"numpy.asarray",
"tensorflow.python_io.TFRecordWriter",
"cv2.calcOpticalFlowFarneback",
"cv2.waitKey",
"tensorflow.python.platform.app.run",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.train.BytesList"... | [((1141, 1241), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_videos"""', '(1000)', '"""Number of videos stored in one single tfrecords file"""'], {}), "('num_videos', 1000,\n 'Number of videos stored in one single tfrecords file')\n", (1161, 1241), False, 'from tensorflow.python.platform import flags\n'), ((1238, 1448), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""image_color_depth"""', 'np.uint8', '"""Color depth for the images stored in the tfrecords files. Has to correspond to the source video color depth. Specified as np dtype (e.g. np.uint8)."""'], {}), "('image_color_depth', np.uint8,\n 'Color depth for the images stored in the tfrecords files. Has to correspond to the source video color depth. Specified as np dtype (e.g. np.uint8).'\n )\n", (1257, 1448), False, 'from tensorflow.python.platform import flags\n'), ((1557, 1624), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""source"""', 'SOURCE', '"""Directory with video files"""'], {}), "('source', SOURCE, 'Directory with video files')\n", (1576, 1624), False, 'from tensorflow.python.platform import flags\n'), ((1625, 1712), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_path"""', 'DESTINATION', '"""Directory for storing tf records"""'], {}), "('output_path', DESTINATION,\n 'Directory for storing tf records')\n", (1644, 1712), False, 'from tensorflow.python.platform import flags\n'), ((1709, 1858), 'tensorflow.python.platform.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""optical_flow"""', '(True)', '"""Indictes whether optical flow shall be computed and added as fourth channel. Defaults to False"""'], {}), "('optical_flow', True,\n 'Indictes whether optical flow shall be computed and added as fourth channel. Defaults to False'\n )\n", (1729, 1858), False, 'from tensorflow.python.platform import flags\n'), ((2466, 2483), 'numpy.asarray', 'np.asarray', (['frame'], {}), '(frame)\n', (2476, 2483), True, 'import numpy as np\n'), ((2599, 2643), 'cv2.cvtColor', 'cv2.cvtColor', (['prev_image', 'cv2.COLOR_BGR2GRAY'], {}), '(prev_image, cv2.COLOR_BGR2GRAY)\n', (2611, 2643), True, 'import cv2 as cv2\n'), ((2667, 2714), 'cv2.cvtColor', 'cv2.cvtColor', (['current_image', 'cv2.COLOR_BGR2GRAY'], {}), '(current_image, cv2.COLOR_BGR2GRAY)\n', (2679, 2714), True, 'import cv2 as cv2\n'), ((2765, 2790), 'numpy.zeros_like', 'np.zeros_like', (['prev_image'], {}), '(prev_image)\n', (2778, 2790), True, 'import numpy as np\n'), ((2821, 2918), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prev_image_gray', 'current_image_gray', '(0.8)', '(15)', '(5)', '(10)', '(5)', '(1.5)', '(0)'], {}), '(prev_image_gray, current_image_gray, 0.8, 15, \n 5, 10, 5, 1.5, 0)\n', (2849, 2918), True, 'import cv2 as cv2\n'), ((2928, 2971), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (2943, 2971), True, 'import cv2 as cv2\n'), ((3020, 3069), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (3033, 3069), True, 'import cv2 as cv2\n'), ((3079, 3115), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (3091, 3115), True, 'import cv2 as cv2\n'), ((11080, 11094), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (11088, 11094), True, 'import numpy as np\n'), ((11262, 11271), 'tensorflow.python.platform.app.run', 'app.run', ([], {}), '()\n', (11269, 11271), False, 'from tensorflow.python.platform import app\n'), ((2332, 2354), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (2348, 2354), True, 'import cv2 as cv2\n'), ((7531, 7628), 'numpy.zeros', 'np.zeros', (['(HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel)'], {'dtype': 'FLAGS.image_color_depth'}), '((HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel), dtype=FLAGS.\n image_color_depth)\n', (7539, 7628), True, 'import numpy as np\n'), ((7636, 7736), 'numpy.zeros', 'np.zeros', (['(NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO, NUM_CHANNELS_VIDEO)'], {'dtype': 'np.uint32'}), '((NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO,\n NUM_CHANNELS_VIDEO), dtype=np.uint32)\n', (7644, 7736), True, 'import numpy as np\n'), ((7765, 7789), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (7779, 7789), False, 'import os, math\n'), ((8173, 8218), 'math.floor', 'math.floor', (['(frameCount / NUM_FRAMES_PER_VIDEO)'], {}), '(frameCount / NUM_FRAMES_PER_VIDEO)\n', (8183, 8218), False, 'import os, math\n'), ((1961, 1994), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (1979, 1994), True, 'import tensorflow as tf\n'), ((2061, 2094), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (2079, 2094), True, 'import tensorflow as tf\n'), ((4016, 4054), 'os.path.join', 'os.path.join', (['source_path', 'FILE_FILTER'], {}), '(source_path, FILE_FILTER)\n', (4028, 4054), False, 'import os, math\n'), ((5823, 5860), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (5850, 5860), True, 'import tensorflow as tf\n'), ((6362, 6396), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (6379, 6396), True, 'import tensorflow as tf\n'), ((8655, 8676), 'math.floor', 'math.floor', (['(f % steps)'], {}), '(f % steps)\n', (8665, 8676), False, 'import os, math\n'), ((9270, 9287), 'cv2.waitKey', 'cv2.waitKey', (['(3000)'], {}), '(3000)\n', (9281, 9287), True, 'import cv2 as cv2\n'), ((9587, 9639), 'cv2.resize', 'cv2.resize', (['frame[:, :]', '(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '(frame[:, :], (HEIGHT_VIDEO, WIDTH_VIDEO))\n', (9597, 9639), True, 'import cv2 as cv2\n'), ((9743, 9798), 'cv2.resize', 'cv2.resize', (['frame[:, :, k]', '(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '(frame[:, :, k], (HEIGHT_VIDEO, WIDTH_VIDEO))\n', (9753, 9798), True, 'import cv2 as cv2\n'), ((10015, 10052), 'numpy.zeros', 'np.zeros', (['(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '((HEIGHT_VIDEO, WIDTH_VIDEO))\n', (10023, 10052), True, 'import numpy as np\n'), ((10158, 10201), 'cv2.cvtColor', 'cv2.cvtColor', (['frameFlow', 'cv2.COLOR_BGR2GRAY'], {}), '(frameFlow, cv2.COLOR_BGR2GRAY)\n', (10170, 10201), True, 'import cv2 as cv2\n'), ((10254, 10291), 'numpy.zeros', 'np.zeros', (['(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '((HEIGHT_VIDEO, WIDTH_VIDEO))\n', (10262, 10291), True, 'import numpy as np\n'), ((10480, 10513), 'numpy.expand_dims', 'np.expand_dims', (['frameFlow'], {'axis': '(2)'}), '(frameFlow, axis=2)\n', (10494, 10513), True, 'import numpy as np\n')] |
import numpy as np
import SimpleITK as sitk
def reference_image_build(spacing, size, direction, template_size, dim):
#template size: image(array) dimension to resize to: a list of three elements
reference_spacing = np.array(size)/np.array(template_size)*np.array(spacing)
reference_spacing[0] = 1.2
reference_spacing[1] = 1.2
reference_image = sitk.Image(template_size, 0)
reference_image.SetOrigin(np.zeros(3))
reference_image.SetSpacing(reference_spacing)
reference_image.SetDirection(direction)
return reference_image
def centering(img, ref_img, order=1):
dimension = img.GetDimension()
transform = sitk.AffineTransform(dimension)
transform.SetTranslation(np.array(img.GetOrigin()) - ref_img.GetOrigin())
# Modify the transformation to align the centers of the original and reference image instead of their origins.
centering_transform = sitk.TranslationTransform(dimension)
img_center = np.array(img.TransformContinuousIndexToPhysicalPoint(np.array(img.GetSize())/2.0))
reference_center = np.array(ref_img.TransformContinuousIndexToPhysicalPoint(np.array(ref_img.GetSize())/2.0))
centering_transform.SetOffset(np.array(transform.GetInverse().TransformPoint(img_center) - reference_center))
centered_transform = sitk.Transform(transform)
centered_transform.AddTransform(centering_transform)
return transform_func(img, ref_img, centered_transform, order)
def isometric_transform(image, ref_img, orig_direction, order=1, target=None):
dim = ref_img.GetDimension()
affine = sitk.AffineTransform(dim)
if target is None:
target = np.eye(dim)
ori = np.reshape(orig_direction, np.eye(dim).shape)
target = np.reshape(target, np.eye(dim).shape)
affine.SetCenter(ref_img.TransformContinuousIndexToPhysicalPoint(np.array(ref_img.GetSize())/2.0))
return transform_func(image, ref_img, affine, order)
def transform_func(image, reference_image, transform, order=1):
# Output image Origin, Spacing, Size, Direction are taken from the reference
# image in this call to Resample
if order ==1:
interpolator = sitk.sitkLinear
elif order ==2:
interpolator = sitk.sitkBSpline
elif order == 0:
interpolator = sitk.sitkNearestNeighbor
default_value = 0
try:
resampled = sitk.Resample(image, reference_image, transform,
interpolator, default_value)
except Exception as e: print(e)
return resampled
def resample_spacing(sitkIm, resolution=0.5, dim=3, template_size=(256, 256), order=1):
if type(sitkIm) is str:
image = sitk.ReadImage(sitkIm)
else:
image = sitkIm
orig_direction = image.GetDirection()
orig_size = np.array(image.GetSize(), dtype=np.int)
orig_spacing = np.array(image.GetSpacing())
new_size = orig_size*(orig_spacing/np.array(resolution))
new_size = np.ceil(new_size).astype(np.int) # Image dimensions are in integers
new_size = [int(s) for s in new_size]
template_size = (template_size[0], template_size[1], int(orig_size[-1]))
ref_img = reference_image_build(resolution, new_size, image.GetDirection(), template_size, dim)
centered = centering(image, ref_img, order)
transformed = isometric_transform(centered, ref_img, orig_direction, order)
return transformed, ref_img
def resize_to_size(image, size=(256, 256), order=1):
orig_size = np.array(image.GetSize(), dtype=np.int)
orig_spacing =np.array(image.GetSpacing())
new_size = [int(size[0]), int(size[1]), int(orig_size[-1])]
new_spacing = orig_spacing*orig_size/np.array(new_size)
if order ==1:
interpolator = sitk.sitkLinear
elif order ==2:
interpolator = sitk.sitkBSpline
elif order == 0:
interpolator = sitk.sitkNearestNeighbor
default_value = 0
fltr = sitk.ResampleImageFilter()
fltr.SetSize(new_size)
fltr.SetOutputSpacing(new_spacing)
fltr.SetOutputOrigin(image.GetOrigin())
fltr.SetOutputDirection(image.GetDirection())
fltr.SetInterpolator(interpolator)
image = fltr.Execute(image)
return image
def resample_scale(sitkIm, ref_img,gt_img=None, scale_factor=1., order=1):
sitkIm.SetDirection(np.eye(3).ravel())
ref_img.SetDirection(np.eye(3).ravel())
gt_img.SetDirection(np.eye(3).ravel())
dim = sitkIm.GetDimension()
affine = sitk.AffineTransform(dim)
scale= np.array(ref_img.GetDirection())
scale = np.reshape(scale, (dim,dim))
scale[:,0] *= 1./scale_factor
scale[:,1] *= 1./scale_factor
if gt_img is not None:
stats = sitk.LabelShapeStatisticsImageFilter()
stats.Execute(sitk.Cast(gt_img,sitk.sitkInt32))
center = stats.GetCentroid(1)
else:
center = sitkIm.TransformContinuousIndexToPhysicalPoint(np.array(sitkIm.GetSize())/2.0)
affine.SetMatrix(scale.ravel())
affine.SetCenter(center)
transformed = transform_func(sitkIm, ref_img, affine, order)
return transformed
def swap_labels(labels):
unique_label = np.unique(labels)
new_label = range(len(unique_label))
for i in range(len(unique_label)):
label = unique_label[i]
newl = new_label[i]
labels[labels==label] = newl
return labels
def swap_labels_back(labels,pred):
unique_label = np.unique(labels)
new_label = range(len(unique_label))
for i in range(len(unique_label)):
pred[pred==i] = unique_label[i]
return pred
def rescale_intensity(slice_im):
if type(slice_im) != np.ndarray:
raise RuntimeError("Input image is not numpy array")
#upper = np.percentile(slice_im, 90)
upper = np.percentile(slice_im, 99)
lower = np.percentile(slice_im, 20)
slice_im[slice_im>upper] = upper
slice_im[slice_im<lower] = lower
slice_im -= lower
rng = upper - lower
slice_im = slice_im/rng*2.
slice_im -= 1.
#slice_im = (slice_im - np.mean(slice_im))/np.std(slice_im)
return slice_im
def swap_low_freq(im1, im2, beta):
"""
Change the low frequency of im2 with that of im1
Beta: ratio between the swaped region and the image dimension
"""
#im1 = denoise(im1, 10, 0.125)
#im2 = denoise(im2, 10, 0.125)
im1 = np.squeeze(im1)
im2 = np.squeeze(im2)
im1 = im1- np.min(im1)
im2 = im2-np.min(im2)
im1_fft = np.fft.fftshift(np.fft.fft2(im1))
im2_fft = np.fft.fftshift(np.fft.fft2(im2))
change = beta * np.array(im2_fft.shape)
up0 = int(im2.shape[0]/2-change[0]/2)
down0 = int(im2.shape[0]/2+change[0]/2)
up1 = int(im2.shape[1]/2-change[1]/2)
down1 = int(im2.shape[1]/2+change[1]/2)
#im2_fft[up0:down0, up1:down1] = 0.
im2_fft[up0:down0, up1:down1] = im1_fft[up0:down0, up1:down1]
im2_new = np.abs(np.real(np.fft.ifft2(im2_fft)))
return im1, im2, im2_new
class SpatialTransform(object):
'''
Base class to image transform
'''
def __init__(self, image):
self.image = image
self.dim = image.GetDimension()
def apply_transform(self):
output = []
out_im = transform_func(self.image, self.image, self.transform, order=1)
output.append(out_im)
return output
def add_transform(self, transform):
total = sitk.Transform(self.transform)
total.AddTransform(transform)
self.transform = total
class AffineTransform(SpatialTransform):
'''
Apply random affine transform to input 3D image volume
'''
def __init__(self, image, shear_range, scale_range, rot_range, trans_range, flip_prob):
super(AffineTransform, self).__init__(image)
self.shear_range = shear_range
self.scale_range = scale_range
self.rot_range = rot_range
self.flip_prob = flip_prob
self.trans_range = trans_range
self.transform = sitk.AffineTransform(self.dim)
def scale(self):
self.transform = sitk.AffineTransform(self.transform)
scale = np.eye(self.dim)
scale = np.diag( 1./np.random.uniform(self.scale_range[0], self.scale_range[1], self.dim))
matrix = np.array(self.transform.GetMatrix()).reshape((self.dim,self.dim))
matrix = np.matmul(matrix, scale)
self.transform.SetMatrix(matrix.ravel())
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def rotate(self):
angles = np.random.uniform(self.rot_range[0], self.rot_range[1], self.dim)
rads = np.array(angles)/180.*np.pi
x_rot = np.eye(self.dim)
x_rot = [[1., 0., 0.], [0., np.cos(rads[0]), -np.sin(rads[0])], [0., np.sin(rads[0]), np.cos(rads[0])]]
y_rot = [[np.cos(rads[1]), 0., np.sin(rads[1])], [0.,1.,0.], [-np.sin(rads[1]), 0., np.cos(rads[1])]]
z_rot = [[np.cos(rads[2]), -np.sin(rads[2]), 0.], [np.sin(rads[2]), np.cos(rads[2]), 0.], [0., 0., 1.]]
rot_matrix = np.matmul(np.matmul(np.array(x_rot), np.array(y_rot)), np.array(z_rot))
matrix = np.array(self.transform.GetMatrix()).reshape((self.dim, self.dim))
matrix = np.matmul(matrix, rot_matrix)
self.transform = sitk.AffineTransform(self.transform)
self.transform.SetMatrix(matrix.ravel())
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def translate(self):
self.transform = sitk.AffineTransform(self.transform)
params = np.random.uniform(self.trans_range[0],self.trans_range[1], self.dim)
print("Translation: " , params)
self.transform.SetTranslation(params)
#self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def shear(self):
self.transform = sitk.AffineTransform(self.transform)
axis = np.argsort(np.random.rand(self.dim))
self.transform.Shear(int(axis[0]), int(axis[1]), np.random.uniform(self.shear_range[0],
self.shear_range[1]))
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def flip(self):
flip = np.random.rand(self.dim)>self.flip_prob
flip_matrix = np.eye(self.dim)
flip_matrix[np.diag(flip)] = -1.
self.transform = sitk.AffineTransform(self.transform)
matrix = np.array(self.transform.GetMatrix()).reshape((self.dim,self.dim))
matrix = np.matmul(matrix, flip_matrix)
self.transform.SetMatrix(matrix.ravel())
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def affine(self):
# commented out others since we only need translation for now
#self.shear()
#self.rotate()
self.translate()
#self.flip()
#self.scale()
def apply_transform(self):
output = []
out_im = transform_func(self.image, self.image, self.transform, order=1)
output.append(out_im)
return output
def affine_usage(sitk_image):
'''
example function to apply affine transform to images
'''
params_affine = {
'scale_range': [0.8, 1.2],
'rot_range': [-20., 20.],
'trans_range': [-5., 5.], # range of translation
'shear_range': [-0.13, 0.13],
'flip_prob': 0.3
}
affine = AffineTransform(sitk_image, **params_affine)
affine.affine()
output = affine.apply_transform()
return output
| [
"numpy.random.rand",
"SimpleITK.AffineTransform",
"numpy.array",
"numpy.sin",
"numpy.reshape",
"numpy.fft.fft2",
"numpy.matmul",
"numpy.min",
"SimpleITK.Resample",
"numpy.eye",
"SimpleITK.TranslationTransform",
"numpy.ceil",
"SimpleITK.Image",
"numpy.squeeze",
"numpy.cos",
"SimpleITK.C... | [((357, 385), 'SimpleITK.Image', 'sitk.Image', (['template_size', '(0)'], {}), '(template_size, 0)\n', (367, 385), True, 'import SimpleITK as sitk\n'), ((628, 659), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dimension'], {}), '(dimension)\n', (648, 659), True, 'import SimpleITK as sitk\n'), ((873, 909), 'SimpleITK.TranslationTransform', 'sitk.TranslationTransform', (['dimension'], {}), '(dimension)\n', (898, 909), True, 'import SimpleITK as sitk\n'), ((1255, 1280), 'SimpleITK.Transform', 'sitk.Transform', (['transform'], {}), '(transform)\n', (1269, 1280), True, 'import SimpleITK as sitk\n'), ((1524, 1549), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dim'], {}), '(dim)\n', (1544, 1549), True, 'import SimpleITK as sitk\n'), ((3762, 3788), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (3786, 3788), True, 'import SimpleITK as sitk\n'), ((4278, 4303), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dim'], {}), '(dim)\n', (4298, 4303), True, 'import SimpleITK as sitk\n'), ((4356, 4385), 'numpy.reshape', 'np.reshape', (['scale', '(dim, dim)'], {}), '(scale, (dim, dim))\n', (4366, 4385), True, 'import numpy as np\n'), ((4910, 4927), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4919, 4927), True, 'import numpy as np\n'), ((5180, 5197), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (5189, 5197), True, 'import numpy as np\n'), ((5522, 5549), 'numpy.percentile', 'np.percentile', (['slice_im', '(99)'], {}), '(slice_im, 99)\n', (5535, 5549), True, 'import numpy as np\n'), ((5562, 5589), 'numpy.percentile', 'np.percentile', (['slice_im', '(20)'], {}), '(slice_im, 20)\n', (5575, 5589), True, 'import numpy as np\n'), ((6097, 6112), 'numpy.squeeze', 'np.squeeze', (['im1'], {}), '(im1)\n', (6107, 6112), True, 'import numpy as np\n'), ((6123, 6138), 'numpy.squeeze', 'np.squeeze', (['im2'], {}), '(im2)\n', (6133, 6138), True, 'import numpy as np\n'), ((261, 278), 'numpy.array', 'np.array', (['spacing'], {}), '(spacing)\n', (269, 278), True, 'import numpy as np\n'), ((414, 425), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (422, 425), True, 'import numpy as np\n'), ((1584, 1595), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1590, 1595), True, 'import numpy as np\n'), ((2270, 2347), 'SimpleITK.Resample', 'sitk.Resample', (['image', 'reference_image', 'transform', 'interpolator', 'default_value'], {}), '(image, reference_image, transform, interpolator, default_value)\n', (2283, 2347), True, 'import SimpleITK as sitk\n'), ((2564, 2586), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['sitkIm'], {}), '(sitkIm)\n', (2578, 2586), True, 'import SimpleITK as sitk\n'), ((3530, 3548), 'numpy.array', 'np.array', (['new_size'], {}), '(new_size)\n', (3538, 3548), True, 'import numpy as np\n'), ((4489, 4527), 'SimpleITK.LabelShapeStatisticsImageFilter', 'sitk.LabelShapeStatisticsImageFilter', ([], {}), '()\n', (4525, 4527), True, 'import SimpleITK as sitk\n'), ((6154, 6165), 'numpy.min', 'np.min', (['im1'], {}), '(im1)\n', (6160, 6165), True, 'import numpy as np\n'), ((6180, 6191), 'numpy.min', 'np.min', (['im2'], {}), '(im2)\n', (6186, 6191), True, 'import numpy as np\n'), ((6222, 6238), 'numpy.fft.fft2', 'np.fft.fft2', (['im1'], {}), '(im1)\n', (6233, 6238), True, 'import numpy as np\n'), ((6270, 6286), 'numpy.fft.fft2', 'np.fft.fft2', (['im2'], {}), '(im2)\n', (6281, 6286), True, 'import numpy as np\n'), ((6309, 6332), 'numpy.array', 'np.array', (['im2_fft.shape'], {}), '(im2_fft.shape)\n', (6317, 6332), True, 'import numpy as np\n'), ((7115, 7145), 'SimpleITK.Transform', 'sitk.Transform', (['self.transform'], {}), '(self.transform)\n', (7129, 7145), True, 'import SimpleITK as sitk\n'), ((7690, 7720), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.dim'], {}), '(self.dim)\n', (7710, 7720), True, 'import SimpleITK as sitk\n'), ((7768, 7804), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (7788, 7804), True, 'import SimpleITK as sitk\n'), ((7821, 7837), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (7827, 7837), True, 'import numpy as np\n'), ((8037, 8061), 'numpy.matmul', 'np.matmul', (['matrix', 'scale'], {}), '(matrix, scale)\n', (8046, 8061), True, 'import numpy as np\n'), ((8272, 8337), 'numpy.random.uniform', 'np.random.uniform', (['self.rot_range[0]', 'self.rot_range[1]', 'self.dim'], {}), '(self.rot_range[0], self.rot_range[1], self.dim)\n', (8289, 8337), True, 'import numpy as np\n'), ((8397, 8413), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (8403, 8413), True, 'import numpy as np\n'), ((8942, 8971), 'numpy.matmul', 'np.matmul', (['matrix', 'rot_matrix'], {}), '(matrix, rot_matrix)\n', (8951, 8971), True, 'import numpy as np\n'), ((8997, 9033), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (9017, 9033), True, 'import SimpleITK as sitk\n'), ((9259, 9295), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (9279, 9295), True, 'import SimpleITK as sitk\n'), ((9313, 9382), 'numpy.random.uniform', 'np.random.uniform', (['self.trans_range[0]', 'self.trans_range[1]', 'self.dim'], {}), '(self.trans_range[0], self.trans_range[1], self.dim)\n', (9330, 9382), True, 'import numpy as np\n'), ((9636, 9672), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (9656, 9672), True, 'import SimpleITK as sitk\n'), ((10082, 10098), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (10088, 10098), True, 'import numpy as np\n'), ((10166, 10202), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (10186, 10202), True, 'import SimpleITK as sitk\n'), ((10303, 10333), 'numpy.matmul', 'np.matmul', (['matrix', 'flip_matrix'], {}), '(matrix, flip_matrix)\n', (10312, 10333), True, 'import numpy as np\n'), ((222, 236), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (230, 236), True, 'import numpy as np\n'), ((237, 260), 'numpy.array', 'np.array', (['template_size'], {}), '(template_size)\n', (245, 260), True, 'import numpy as np\n'), ((1634, 1645), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1640, 1645), True, 'import numpy as np\n'), ((1683, 1694), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1689, 1694), True, 'import numpy as np\n'), ((2791, 2811), 'numpy.array', 'np.array', (['resolution'], {}), '(resolution)\n', (2799, 2811), True, 'import numpy as np\n'), ((2826, 2843), 'numpy.ceil', 'np.ceil', (['new_size'], {}), '(new_size)\n', (2833, 2843), True, 'import numpy as np\n'), ((4548, 4581), 'SimpleITK.Cast', 'sitk.Cast', (['gt_img', 'sitk.sitkInt32'], {}), '(gt_img, sitk.sitkInt32)\n', (4557, 4581), True, 'import SimpleITK as sitk\n'), ((6641, 6662), 'numpy.fft.ifft2', 'np.fft.ifft2', (['im2_fft'], {}), '(im2_fft)\n', (6653, 6662), True, 'import numpy as np\n'), ((8824, 8839), 'numpy.array', 'np.array', (['z_rot'], {}), '(z_rot)\n', (8832, 8839), True, 'import numpy as np\n'), ((9701, 9725), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (9715, 9725), True, 'import numpy as np\n'), ((9784, 9843), 'numpy.random.uniform', 'np.random.uniform', (['self.shear_range[0]', 'self.shear_range[1]'], {}), '(self.shear_range[0], self.shear_range[1])\n', (9801, 9843), True, 'import numpy as np\n'), ((10020, 10044), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (10034, 10044), True, 'import numpy as np\n'), ((10119, 10132), 'numpy.diag', 'np.diag', (['flip'], {}), '(flip)\n', (10126, 10132), True, 'import numpy as np\n'), ((4135, 4144), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4141, 4144), True, 'import numpy as np\n'), ((4177, 4186), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4183, 4186), True, 'import numpy as np\n'), ((4218, 4227), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4224, 4227), True, 'import numpy as np\n'), ((7866, 7935), 'numpy.random.uniform', 'np.random.uniform', (['self.scale_range[0]', 'self.scale_range[1]', 'self.dim'], {}), '(self.scale_range[0], self.scale_range[1], self.dim)\n', (7883, 7935), True, 'import numpy as np\n'), ((8353, 8369), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (8361, 8369), True, 'import numpy as np\n'), ((8450, 8465), 'numpy.cos', 'np.cos', (['rads[0]'], {}), '(rads[0])\n', (8456, 8465), True, 'import numpy as np\n'), ((8491, 8506), 'numpy.sin', 'np.sin', (['rads[0]'], {}), '(rads[0])\n', (8497, 8506), True, 'import numpy as np\n'), ((8508, 8523), 'numpy.cos', 'np.cos', (['rads[0]'], {}), '(rads[0])\n', (8514, 8523), True, 'import numpy as np\n'), ((8544, 8559), 'numpy.cos', 'np.cos', (['rads[1]'], {}), '(rads[1])\n', (8550, 8559), True, 'import numpy as np\n'), ((8565, 8580), 'numpy.sin', 'np.sin', (['rads[1]'], {}), '(rads[1])\n', (8571, 8580), True, 'import numpy as np\n'), ((8618, 8633), 'numpy.cos', 'np.cos', (['rads[1]'], {}), '(rads[1])\n', (8624, 8633), True, 'import numpy as np\n'), ((8654, 8669), 'numpy.cos', 'np.cos', (['rads[2]'], {}), '(rads[2])\n', (8660, 8669), True, 'import numpy as np\n'), ((8695, 8710), 'numpy.sin', 'np.sin', (['rads[2]'], {}), '(rads[2])\n', (8701, 8710), True, 'import numpy as np\n'), ((8712, 8727), 'numpy.cos', 'np.cos', (['rads[2]'], {}), '(rads[2])\n', (8718, 8727), True, 'import numpy as np\n'), ((8789, 8804), 'numpy.array', 'np.array', (['x_rot'], {}), '(x_rot)\n', (8797, 8804), True, 'import numpy as np\n'), ((8806, 8821), 'numpy.array', 'np.array', (['y_rot'], {}), '(y_rot)\n', (8814, 8821), True, 'import numpy as np\n'), ((8468, 8483), 'numpy.sin', 'np.sin', (['rads[0]'], {}), '(rads[0])\n', (8474, 8483), True, 'import numpy as np\n'), ((8597, 8612), 'numpy.sin', 'np.sin', (['rads[1]'], {}), '(rads[1])\n', (8603, 8612), True, 'import numpy as np\n'), ((8672, 8687), 'numpy.sin', 'np.sin', (['rads[2]'], {}), '(rads[2])\n', (8678, 8687), True, 'import numpy as np\n')] |
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
# Return recipe id url
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
# Sample tag to be used in recipe
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
# Sample Ingredient to be used in recipe
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
# Create and return a sample recipe
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeAPITest(TestCase):
# Test unauthenticated recipe API access
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
# Test that auth is required
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeAPITest(TestCase):
# Test Authenticated recipe API access
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
# Test retrieving a list of recipes
sample_recipe(user=self.user)
sample_recipe(user=self.user, title="Hash")
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all()
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
# Test retrieving recipes for user
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>',
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
# Test viewing a recipe detail
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
payload = {
'title': 'Chocolate Cheese Cake',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tag(self):
# Test creating a recipe with tags
tag1 = sample_tag(user=self.user, name="Vegan")
tag2 = sample_tag(user=self.user, name="Dessert")
payload = {
'title': 'Avocado Lime Cheese Cake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredient(self):
# Test creating a recipe with ingredients
ingredient1 = sample_ingredient(user=self.user, name="Ginger")
ingredient2 = sample_ingredient(user=self.user, name="Prawns")
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 25,
'price': 25
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
| [
"core.models.Recipe.objects.create",
"django.contrib.auth.get_user_model",
"recipe.serializers.RecipeSerializer",
"core.models.Tag.objects.create",
"core.models.Recipe.objects.all",
"rest_framework.test.APIClient",
"core.models.Recipe.objects.get",
"core.models.Recipe.objects.filter",
"django.urls.r... | [((325, 354), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-list"""'], {}), "('recipe:recipe-list')\n", (332, 354), False, 'from django.urls import reverse\n'), ((422, 471), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-detail"""'], {'args': '[recipe_id]'}), "('recipe:recipe-detail', args=[recipe_id])\n", (429, 471), False, 'from django.urls import reverse\n'), ((565, 605), 'core.models.Tag.objects.create', 'Tag.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (583, 605), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((710, 757), 'core.models.Ingredient.objects.create', 'Ingredient.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (735, 757), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((983, 1027), 'core.models.Recipe.objects.create', 'Recipe.objects.create', ([], {'user': 'user'}), '(user=user, **defaults)\n', (1004, 1027), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1156, 1167), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1165, 1167), False, 'from rest_framework.test import APIClient\n'), ((1483, 1494), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1492, 1494), False, 'from rest_framework.test import APIClient\n'), ((1895, 1915), 'core.models.Recipe.objects.all', 'Recipe.objects.all', ([], {}), '()\n', (1913, 1915), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1937, 1973), 'recipe.serializers.RecipeSerializer', 'RecipeSerializer', (['recipes'], {'many': '(True)'}), '(recipes, many=True)\n', (1953, 1973), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((2426, 2463), 'core.models.Recipe.objects.filter', 'Recipe.objects.filter', ([], {'user': 'self.user'}), '(user=self.user)\n', (2447, 2463), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2485, 2521), 'recipe.serializers.RecipeSerializer', 'RecipeSerializer', (['recipes'], {'many': '(True)'}), '(recipes, many=True)\n', (2501, 2521), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((3019, 3049), 'recipe.serializers.RecipeDetailSerializer', 'RecipeDetailSerializer', (['recipe'], {}), '(recipe)\n', (3041, 3049), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((3419, 3456), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (3437, 3456), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((4073, 4110), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (4091, 4110), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((4828, 4865), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (4846, 4865), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1515, 1531), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1529, 1531), False, 'from django.contrib.auth import get_user_model\n'), ((2193, 2209), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2207, 2209), False, 'from django.contrib.auth import get_user_model\n')] |
import gtfs_kit
from representation.gtfs_metadata import GtfsMetadata
from representation.gtfs_representation import GtfsRepresentation
from representation.dataset_infos import DatasetInfos
from requests.exceptions import MissingSchema
from pandas.errors import ParserError
GTFS_TYPE = "GTFS"
GBFS_TYPE = "GBFS"
def build_representation(dataset_type, dataset_infos):
"""Dataset representation builder function.
The factory builds and return a dataset representation according to the dataset type.
:param dataset_type: The type of the dataset, either GTFS or GBFS.
:param dataset_infos: The processing infos of the dataset.
"""
if not isinstance(dataset_infos, DatasetInfos):
raise TypeError("Dataset infos must be a valid DatasetInfos.")
representation = None
if dataset_type == GTFS_TYPE:
representation = build_gtfs_representation(dataset_infos)
elif dataset_type == GBFS_TYPE:
representation = build_gbfs_representation(dataset_infos)
return representation
def build_gtfs_representation(dataset_infos):
try:
dataset = gtfs_kit.read_feed(dataset_infos.zip_path, dist_units="km")
except TypeError as te:
raise TypeError(
f"Exception '{te}' occurred while reading the GTFS dataset with the GTFS kit library."
f"The dataset must be a valid GTFS zip file or URL.\n"
)
except MissingSchema as ms:
raise MissingSchema(
f"Exception '{ms}' occurred while opening the GTFS dataset with the GTFS kit library."
f"The dataset must be a valid GTFS zip file or URL.\n"
)
except ParserError as pe:
print(
f"Exception {pe} found while reading the dataset. "
f"Continuing adding dataset infos to database without metadata"
)
return None
metadata = GtfsMetadata(dataset_infos)
representation = GtfsRepresentation(
dataset_infos.source_entity_code, dataset, metadata
)
return representation
def build_gbfs_representation(dataset_infos):
raise NotImplementedError
| [
"representation.gtfs_representation.GtfsRepresentation",
"gtfs_kit.read_feed",
"representation.gtfs_metadata.GtfsMetadata",
"requests.exceptions.MissingSchema"
] | [((1859, 1886), 'representation.gtfs_metadata.GtfsMetadata', 'GtfsMetadata', (['dataset_infos'], {}), '(dataset_infos)\n', (1871, 1886), False, 'from representation.gtfs_metadata import GtfsMetadata\n'), ((1908, 1979), 'representation.gtfs_representation.GtfsRepresentation', 'GtfsRepresentation', (['dataset_infos.source_entity_code', 'dataset', 'metadata'], {}), '(dataset_infos.source_entity_code, dataset, metadata)\n', (1926, 1979), False, 'from representation.gtfs_representation import GtfsRepresentation\n'), ((1103, 1162), 'gtfs_kit.read_feed', 'gtfs_kit.read_feed', (['dataset_infos.zip_path'], {'dist_units': '"""km"""'}), "(dataset_infos.zip_path, dist_units='km')\n", (1121, 1162), False, 'import gtfs_kit\n'), ((1438, 1603), 'requests.exceptions.MissingSchema', 'MissingSchema', (['f"""Exception \'{ms}\' occurred while opening the GTFS dataset with the GTFS kit library.The dataset must be a valid GTFS zip file or URL.\n"""'], {}), '(\n f"""Exception \'{ms}\' occurred while opening the GTFS dataset with the GTFS kit library.The dataset must be a valid GTFS zip file or URL.\n"""\n )\n', (1451, 1603), False, 'from requests.exceptions import MissingSchema\n')] |
#!/usr/bin/env python3
"""
This script runs Clarity Chess in UCI (Universal Chess Interface) mode.
"""
import sys
from .Board import Board
from .Move import Move
from .MoveType import MoveType
from .recursion import negamax
from .Sq import Sq
def detect_move_type(board, init_sq, dest_sq):
"""
Parameters
----------
board: Board
the board that the move will be made on
init_sq: Sq or int
the destination square of the move to find the MoveType for
dest_sq: Sq or int
the destination square of the move to find the MoveType for
Returns
-------
Move
the Move with the correct MoveType with given init_sq and dest_sq
"""
# TODO implement
# TODO untested
return Move(init_sq, dest_sq, MoveType.QUIET)
def uci():
"""
Runs Clarity in UCI mode
"""
while True:
line = sys.stdin.readline().strip()
command = line.split(' ')[0]
options = line.split(' ')[1:]
if command == 'uci':
print('id name Clarity')
print('id author <NAME> (<NAME>')
print('uciok')
if command == 'isready':
print('readyok')
if command == 'quit':
return
if command == 'position':
if options[0] == 'startpos':
board = Board()
moves = options[1:]
else:
fen = ' '.join(options[0:6])
board = Board(fen)
moves = options[6:]
for move in moves:
# change move format from str to Move
init_sq = Sq.filerank_to_sq(move[0:2])
dest_sq = Sq.filerank_to_sq(move[2:4])
board.make_move(detect_move_type(board, init_sq, dest_sq))
if command == 'go':
best_move = negamax(board, 2)
print('bestmove ' + best_move.short_str())
# only runs when this module is called directly
if __name__ == '__main__':
uci()
| [
"sys.stdin.readline"
] | [((875, 895), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (893, 895), False, 'import sys\n')] |
__all__ = [
'PyForwardRef',
'PyLiteral',
'PyProtocol',
'PyDeque',
'PyTypedDict',
'PyTypedDicts',
'FrozenKeys',
'DefFactory',
'NoneType',
'ExplicitNullType',
'ExplicitNull',
'JSONList',
'JSONObject',
'ListOfJSONObject',
'JSONValue',
'Encoder',
'Decoder',
'NUMBERS',
'T',
'E',
'U',
'M',
'NT',
'DT',
'DD',
'N',
'S',
'LT',
'LSQ',
'FREF',
]
from collections import deque
from datetime import date, time, datetime
from enum import Enum
from typing import (
Type, TypeVar, Sequence, Mapping,
List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union
)
from uuid import UUID
from .constants import PY36, PY38_OR_ABOVE
from .decorators import discard_kwargs
# Type check for numeric types - needed because `bool` is technically
# a Number.
NUMBERS = int, float
# Generic type
T = TypeVar('T')
# Enum subclass type
E = TypeVar('E', bound=Enum)
# UUID subclass type
U = TypeVar('U', bound=UUID)
# Mapping type
M = TypeVar('M', bound=Mapping)
# NamedTuple type
NT = TypeVar('NT', bound=NamedTuple)
# Date, time, or datetime type
DT = TypeVar('DT', date, time, datetime)
# DefaultDict type
DD = TypeVar('DD', bound=DefaultDict)
# Numeric type
N = TypeVar('N', int, float, complex)
# Sequence type
S = TypeVar('S', bound=Sequence)
# List or Tuple type
LT = TypeVar('LT', list, tuple)
# List, Set, or Deque (Double ended queue) type
LSQ = TypeVar('LSQ', list, set, frozenset, deque)
# A fixed set of key names
FrozenKeys = FrozenSet[str]
# Default factory type, assuming a no-args constructor
DefFactory = Callable[[], T]
# The class of the `None` singleton, cached for re-usability
NoneType = type(None)
# For Python 3.8+, we need to use both `TypedDict` implementations (from both
# the `typing` and `typing_extensions` modules). Because it's not clear which
# version users might choose to use. And they might choose to use either, due
# to the issues mentioned below (comment taken from `typing_extensions`):
#
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
PyTypedDicts: List[Type['TypedDict']] = []
# Valid collection types in JSON.
JSONList = List[Any]
JSONObject = Dict[str, Any]
ListOfJSONObject = List[JSONObject]
# Valid value types in JSON.
JSONValue = Union[None, str, bool, int, float, JSONList, JSONObject]
if PY38_OR_ABOVE: # pragma: no cover
from typing import ForwardRef as PyForwardRef
from typing import Literal as PyLiteral
from typing import Protocol as PyProtocol
from typing import TypedDict as PyTypedDict
from typing import Deque as PyDeque
PyTypedDicts.append(PyTypedDict)
# Python 3.8+ users might import from either `typing` or
# `typing_extensions`, so check for both types.
try:
# noinspection PyUnresolvedReferences
from typing_extensions import TypedDict as PyTypedDict
PyTypedDicts.append(PyTypedDict)
except ImportError:
pass
else: # pragma: no cover
from typing_extensions import Literal as PyLiteral
from typing_extensions import Protocol as PyProtocol
from typing_extensions import TypedDict as PyTypedDict
# Seems like `Deque` was only introduced to `typing` in 3.6.1, so Python
# 3.6.0 won't have it; to be safe, we'll instead import from the
# `typing_extensions` module here.
from typing_extensions import Deque as PyDeque
PyTypedDicts.append(PyTypedDict)
if PY36:
import typing
from typing import _ForwardRef as PyForwardRef
from functools import wraps
# Need to wrap the constructor to discard arguments like `is_argument`
PyForwardRef.__init__ = discard_kwargs(PyForwardRef.__init__)
# This is needed to avoid an`AttributeError` when using PyForwardRef
# as an argument to `TypeVar`, as we do below.
#
# See https://stackoverflow.com/a/69436981/10237506.
_old_type_check = typing._type_check
@wraps(_old_type_check)
def _new_type_check(arg, message):
if arg is PyForwardRef:
return arg
return _old_type_check(arg, message)
typing._type_check = _new_type_check
# ensure the global namespace is the same for users
# regardless of the version of Python they're using
del _new_type_check, typing, wraps
else:
from typing import ForwardRef as PyForwardRef
# Forward references can be either strings or explicit `ForwardRef` objects.
# noinspection SpellCheckingInspection
FREF = TypeVar('FREF', str, PyForwardRef)
# Create our own "nullish" type for explicit type assertions
class ExplicitNullType:
__slots__ = ()
def __bool__(self):
return False
def __repr__(self):
return self.__class__.__qualname__
ExplicitNull = ExplicitNullType()
class Encoder(PyProtocol):
"""
Represents an encoder for Python object -> JSON, e.g. analogous to
`json.dumps`
"""
def __call__(self, obj: Union[JSONObject, JSONList],
**kwargs) -> AnyStr:
...
class Decoder(PyProtocol):
"""
Represents a decoder for JSON -> Python object, e.g. analogous to
`json.loads`
"""
def __call__(self, s: AnyStr,
**kwargs) -> Union[JSONObject, ListOfJSONObject]:
...
| [
"functools.wraps",
"typing.TypeVar"
] | [((923, 935), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (930, 935), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((962, 986), 'typing.TypeVar', 'TypeVar', (['"""E"""'], {'bound': 'Enum'}), "('E', bound=Enum)\n", (969, 986), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1013, 1037), 'typing.TypeVar', 'TypeVar', (['"""U"""'], {'bound': 'UUID'}), "('U', bound=UUID)\n", (1020, 1037), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1058, 1085), 'typing.TypeVar', 'TypeVar', (['"""M"""'], {'bound': 'Mapping'}), "('M', bound=Mapping)\n", (1065, 1085), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1110, 1141), 'typing.TypeVar', 'TypeVar', (['"""NT"""'], {'bound': 'NamedTuple'}), "('NT', bound=NamedTuple)\n", (1117, 1141), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1179, 1214), 'typing.TypeVar', 'TypeVar', (['"""DT"""', 'date', 'time', 'datetime'], {}), "('DT', date, time, datetime)\n", (1186, 1214), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1240, 1272), 'typing.TypeVar', 'TypeVar', (['"""DD"""'], {'bound': 'DefaultDict'}), "('DD', bound=DefaultDict)\n", (1247, 1272), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1293, 1326), 'typing.TypeVar', 'TypeVar', (['"""N"""', 'int', 'float', 'complex'], {}), "('N', int, float, complex)\n", (1300, 1326), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1348, 1376), 'typing.TypeVar', 'TypeVar', (['"""S"""'], {'bound': 'Sequence'}), "('S', bound=Sequence)\n", (1355, 1376), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1404, 1430), 'typing.TypeVar', 'TypeVar', (['"""LT"""', 'list', 'tuple'], {}), "('LT', list, tuple)\n", (1411, 1430), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((1486, 1529), 'typing.TypeVar', 'TypeVar', (['"""LSQ"""', 'list', 'set', 'frozenset', 'deque'], {}), "('LSQ', list, set, frozenset, deque)\n", (1493, 1529), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((4856, 4890), 'typing.TypeVar', 'TypeVar', (['"""FREF"""', 'str', 'PyForwardRef'], {}), "('FREF', str, PyForwardRef)\n", (4863, 4890), False, 'from typing import Type, TypeVar, Sequence, Mapping, List, DefaultDict, FrozenSet, NamedTuple, Callable, AnyStr, Dict, Any, Union\n'), ((4279, 4301), 'functools.wraps', 'wraps', (['_old_type_check'], {}), '(_old_type_check)\n', (4284, 4301), False, 'from functools import wraps\n')] |
import re
import math
import numpy as np
class UpstreamAUG:
def __init__(self, allow_ORF=True, verbose_output=False):
"""
Constructor
:param allow_ORF: bool, True by default, whether to check uORFs
:param verbose_output: bool, False by default, whether to return dictionaries in predict_on_sample() and predict_on_batch() methods or not
"""
self.allow_ORF = allow_ORF
self.verbose_output = verbose_output
pass
def predict_on_sample(self, seq):
"""
Predict_on_sample
:param seq: string, 5'UTR's sequence
:return: if verbose_output: dictionary:
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
else: NumPy array of 1 and 0 depending whether the uAUG is in-frame or not
:example: if the input 5'UTR has 5 AUG, then
{
"frame": [1, 1, 0, 0, 1],
"uORF": [1, 1, 1, 0, 0]
}
"""
if self.allow_ORF:
if self.verbose_output:
ATG_frame = []
ATG_ORF = []
for ATG in re.finditer('ATG', seq.upper()):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
ATG_ORF.append(1)
else:
ATG_ORF.append(0)
if (len(seq) - ATG.start()) % 3:
ATG_frame.append(0)
else:
ATG_frame.append(1)
return {"frame": np.array(ATG_frame), "uORF": np.array(ATG_ORF)}
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq.upper())]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
return np.array(ATG_frame)
else:
pass
def predict_on_sample_with_pos(self, seq):
"""
In comparison to predict_on_sample(), additionally returns the positions of AUGs
:param seq: string utr's sequence
:return: if verbose_output: dictionary
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
third - pos of the ATG
else: NumPy array of 1 and 0 depending whether the uAUG is in-frame or not
:example: if the input 5'UTR has 5 AUG, then
{
"frame": [1, 1, 0, 0, 1],
"uORF": [1, 1, 1, 0, 0],
"pos": [38, 190, 438, 769, 981]
}
"""
if self.allow_ORF:
if self.verbose_output:
ATG_frame = []
ATG_ORF = []
ATG_pos = []
for ATG in re.finditer('ATG', seq.upper()):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
ATG_ORF.append(1)
else:
ATG_ORF.append(0)
if (len(seq) - ATG.start()) % 3:
ATG_frame.append(0)
else:
ATG_frame.append(1)
ATG_pos.append(ATG.start())
return {"frame": np.array(ATG_frame), "uORF": np.array(ATG_ORF), "pos": np.array(ATG_pos)}
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq.upper())]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
return np.array(ATG_frame)
else:
pass
def predict_on_sample_with_pos_pandas(self, seq, result_dict, strand, start=None):
"""
In comparison to predict_on_sample(), additionally returns as positions of AUGs and outputs everything to the \
passed to it dictionary
:param seq: string utr's sequence
:param result_dict: dictionary with 4 mandatory keys "not_in-frame_no_uORF", "not_in-frame_uORF", "in-frame_no_uORF", "in-frame_uORF", where to append the found values
:param start: integer, position relatively to the whole genome (in contrast to position relative to the exon)
"""
if self.allow_ORF:
if strand == '+':
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(ATG.start() + start)
else:
list_11.append(ATG.start() + start)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(ATG.start() + start)
else:
list_10.append(ATG.start() + start)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(start + (len(seq) - ATG.start()) - 1)
else:
list_11.append(start + (len(seq) - ATG.start()) - 1)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(start + (len(seq) - ATG.start()) - 1)
else:
list_10.append(start + (len(seq) - ATG.start()) - 1)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
pass
def predict_on_sample_with_stop_pandas(self, seq, result_dict, strand, start=None):
"""
In comparison to predict_on_sample(), additionally returns as positions of AUGs and outputs everything to the \
passed to it dictionary
:param seq: string utr's sequence
:param result_dict: dictionary with 4 mandatory keys "not_in-frame_no_uORF", "not_in-frame_uORF", \
"in-frame_no_uORF", "in-frame_uORF", where to append the found values
:param start: integer, position relatively to the whole genome (in contrast to position relative to the exon)
"""
if self.allow_ORF:
if strand == '+':
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
ORF = 0
seq_remainder = seq[ATG.start() + 3:]
for TAA in re.finditer('TAA', seq_remainder):
if not (TAA.start() % 3):
ORF = TAA.start()
break
if not ORF:
for TAG in re.finditer('TAG', seq_remainder):
if not (TAG.start() % 3):
ORF = TAG.start()
break
if not ORF:
for TGA in re.finditer('TGA', seq_remainder):
if not (TGA.start() % 3):
ORF = TGA.start()
break
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(ATG.start() + start)
list_01.append(ORF + start)
else:
list_11.append(ATG.start() + start)
list_11.append(ORF + start)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(ATG.start() + start)
else:
list_10.append(ATG.start() + start)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
ORF = 0
seq_remainder = seq[ATG.start() + 3:]
for TAA in re.finditer('TAA', seq_remainder):
if not (TAA.start() % 3):
ORF = TAA.start()
break
if not ORF:
for TAG in re.finditer('TAG', seq_remainder):
if not (TAG.start() % 3):
ORF = TAG.start()
break
if not ORF:
for TGA in re.finditer('TGA', seq_remainder):
if not (TGA.start() % 3):
ORF = TGA.start()
break
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(start + (len(seq) - ATG.start()) - 1)
list_01.append(start + (len(seq) - ORF) - 1)
else:
list_11.append(start + (len(seq) - ATG.start()) - 1)
list_11.append(start + (len(seq) - ORF) - 1)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(start + (len(seq) - ATG.start()) - 1)
else:
list_10.append(start + (len(seq) - ATG.start()) - 1)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
pass
def predict_on_batch(self, seq_list):
"""
Predict on batch
:param seq_list: list of string utr's sequences
:return: if verbose_output: NumPy array of dictionaries
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
else: NumPy array of 1 and 0 whether the uAUG is in-frame or not
"""
if self.allow_ORF:
result_list = []
for seq in seq_list:
result_list.append(self.predict_on_sample(seq))
return result_list
else:
pass
| [
"numpy.array",
"math.ceil",
"re.finditer"
] | [((2560, 2579), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (2568, 2579), True, 'import numpy as np\n'), ((4952, 4971), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (4960, 4971), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (2243, 2254), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.array', 'np.array', (['ATG_ORF'], {}), '(ATG_ORF)\n', (2272, 2281), True, 'import numpy as np\n'), ((4601, 4620), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (4609, 4620), True, 'import numpy as np\n'), ((4630, 4647), 'numpy.array', 'np.array', (['ATG_ORF'], {}), '(ATG_ORF)\n', (4638, 4647), True, 'import numpy as np\n'), ((4656, 4673), 'numpy.array', 'np.array', (['ATG_pos'], {}), '(ATG_pos)\n', (4664, 4673), True, 'import numpy as np\n'), ((5950, 5973), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (5961, 5973), False, 'import re\n'), ((8098, 8121), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (8109, 8121), False, 'import re\n'), ((11018, 11041), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (11029, 11041), False, 'import re\n'), ((13475, 13498), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (13486, 13498), False, 'import re\n'), ((2491, 2509), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (2500, 2509), False, 'import math\n'), ((4883, 4901), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (4892, 4901), False, 'import math\n'), ((7229, 7246), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (7237, 7246), True, 'import numpy as np\n'), ((7308, 7325), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (7316, 7325), True, 'import numpy as np\n'), ((7386, 7403), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (7394, 7403), True, 'import numpy as np\n'), ((7461, 7478), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (7469, 7478), True, 'import numpy as np\n'), ((9445, 9462), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (9453, 9462), True, 'import numpy as np\n'), ((9524, 9541), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (9532, 9541), True, 'import numpy as np\n'), ((9602, 9619), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (9610, 9619), True, 'import numpy as np\n'), ((9677, 9694), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (9685, 9694), True, 'import numpy as np\n'), ((11173, 11206), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (11184, 11206), False, 'import re\n'), ((12606, 12623), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (12614, 12623), True, 'import numpy as np\n'), ((12685, 12702), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (12693, 12702), True, 'import numpy as np\n'), ((12763, 12780), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (12771, 12780), True, 'import numpy as np\n'), ((12838, 12855), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (12846, 12855), True, 'import numpy as np\n'), ((13630, 13663), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (13641, 13663), False, 'import re\n'), ((15165, 15182), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (15173, 15182), True, 'import numpy as np\n'), ((15244, 15261), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (15252, 15261), True, 'import numpy as np\n'), ((15322, 15339), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (15330, 15339), True, 'import numpy as np\n'), ((15397, 15414), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (15405, 15414), True, 'import numpy as np\n'), ((1394, 1427), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (1405, 1427), False, 'import re\n'), ((3712, 3745), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (3723, 3745), False, 'import re\n'), ((7584, 7607), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (7595, 7607), False, 'import re\n'), ((7722, 7740), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (7731, 7740), False, 'import math\n'), ((9800, 9823), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (9811, 9823), False, 'import re\n'), ((9938, 9956), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (9947, 9956), False, 'import math\n'), ((11425, 11458), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (11436, 11458), False, 'import re\n'), ((12961, 12984), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (12972, 12984), False, 'import re\n'), ((13099, 13117), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (13108, 13117), False, 'import math\n'), ((13882, 13915), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (13893, 13915), False, 'import re\n'), ((15520, 15543), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (15531, 15543), False, 'import re\n'), ((15658, 15676), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (15667, 15676), False, 'import math\n'), ((1595, 1628), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (1606, 1628), False, 'import re\n'), ((3913, 3946), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (3924, 3946), False, 'import re\n'), ((6103, 6136), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (6114, 6136), False, 'import re\n'), ((8251, 8284), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (8262, 8284), False, 'import re\n'), ((11697, 11730), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (11708, 11730), False, 'import re\n'), ((14154, 14187), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (14165, 14187), False, 'import re\n'), ((1812, 1845), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (1823, 1845), False, 'import re\n'), ((4130, 4163), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (4141, 4163), False, 'import re\n'), ((6320, 6353), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (6331, 6353), False, 'import re\n'), ((8468, 8501), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (8479, 8501), False, 'import re\n'), ((6553, 6586), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (6564, 6586), False, 'import re\n'), ((8701, 8734), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (8712, 8734), False, 'import re\n')] |