code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
from typing import List, Optional
import pickle
import numpy as np
from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align
from config import Config as cf
PAD = 0 # TODO: choose appropriate index for these special chars
UNK = 1
DEFAULT = {'PAD': PAD, 'UNK': UNK}
DEFAULT_C = {'': PAD, 'UNK': UNK}
def word_lookup(w: str, table: dict, default=None):
"""
Translate a word into a value by looking up from a dict.
First priority is case-sensitive, then the next priority is case-insensitive.
In case the word does not exist in the dict, a KeyError exception is raised or a default value is returned.
Args:
w: word to translate
table: a dict to translate the word by looking up
default: If not None, this is the value to return in case the word does not exist in the table.
Returns:
Translated value for the word by looking up the word into table.
"""
if w in table: # Match word in case-sentitive mode is the first priority
return table[w]
elif w.lower() in table: # Then, case-insensitive
return table[w.lower()]
else:
if default is not None:
return default
else:
raise KeyError('Key `{}` not found'.format(w))
def char_lookup(c: str, table: dict, default=None):
"""
Translate a char into a value by looking up from a dict.
Args:
c: char to translate
table: a dict to translate the char by looking up
default: If not None, this is the value to return in case the char does not exist in the table.
Returns:
Translated value for the char by looking up the char into table.
"""
if c in table: # Match word in case-sentitive mode is the first priority
return table[c]
else:
if default is not None:
return default
else:
raise KeyError('Key `{}` not found'.format(c))
class Vocabulary(object):
def __init__(self, wv: dict, char_vocab: set):
offset = len(DEFAULT)
w2id = {w: idx+offset for idx, w in enumerate(wv.keys())}
w2id.update(DEFAULT)
id2w = {i:w for w, i in w2id.items()}
c2id = {c: idx+offset for idx, c in enumerate(list(char_vocab))}
c2id.update(DEFAULT_C)
id2c = {i:c for c, i in c2id.items()}
self.wv = wv
self.emb_size = len(wv['the']) # most common word that absolutely appears in the dict
self.w2id = w2id # mapping word to index
self.id2w = id2w # mapping index to word
self.c2id = c2id # mapping char to index
self.id2c = id2c # mapping index to char
def vectorize(self, tokens: List[str], length: int):
"""
Convert list of text tokens into list of indices
"""
vect = [word_lookup(t, self.w2id, default=UNK) for t in tokens]
vect = vect[:length]
if len(vect) < length:
vect.extend([PAD]*(length-len(vect)))
return vect
def vectorize_c(self, chars_list: List[List[str]], length: int, w_length: int):
"""
Convert list of list of chars into list of index-based representation
"""
vects = []
PAD_VECT = [PAD]*w_length
for chars in chars_list:
vects.append([char_lookup(c, self.c2id, default=UNK) for c in chars])
vects = vects[:length]
while len(vects) < length:
vects.append(PAD_VECT)
return vects
def get_embed_weights(self):
"""
Build weights for a word embedding layer.
Note that pre-trained word embedding is used, so no need to parameterize embed_size.
Args:
emb_size: Dim of the vectors
Returns:
[N, emb_size] matrix, where N is number of VOCAB + 1 (for pad)
"""
emb_size = len(self.wv[list(self.wv.keys())[0]])
weights = np.zeros((len(self.id2w), emb_size))
for i, tok in self.id2w.items():
if tok in self.wv:
weights[i] = self.wv[tok]
else:
weights[i] = np.random.uniform(0.0, 1.0, [emb_size])
return weights
def get_char_embed_weights(self, emb_size=64):
"""
Initialize weights for char embedding layer.
Args:
emb_size: Dim of the vectors
Returns:
[len(id2c), emb_size] matrix
"""
weights = emb = np.random.uniform(0.0, 1.0, size=(len(self.id2c), emb_size))
return weights
@property
def vocab_size(self):
return len(self.w2id)
def __getitem__(self, idx):
"""
Get vector for a word.
"""
if not isinstance(idx, str):
raise ValueError('Index must be a string')
return word_lookup(idx, self.wv, default=None)
def __contains__(self, idx):
if not isinstance(idx, str):
raise ValueError('Index must be a string')
return idx in self.wv or idx.lower() in self.wv
class Span(object):
def __init__(self, start_idx: int, end_idx: int):
self.start = start_idx # index of the start token in context
self.end = end_idx # index of the end token in context
@classmethod
def allocate(cls, anchors: List[int], start_char: int, end_char: int):
start_idx = 0
while anchors[start_idx] < start_char:
start_idx += 1
if anchors[start_idx] > start_char:
start_idx -= 1
end_idx = start_idx
while end_idx < len(anchors) and anchors[end_idx] <= end_char:
end_idx += 1
end_idx -= 1
return Span(start_idx, end_idx)
def __str__(self):
return "({}, {})".format(self.start, self.end)
class Answer(object):
def __init__(self, answer_text: str, answer_toks: List[str], span: Span, answer_start: int):
self.answer_text = answer_text # original answer text in JSON
self.answer_toks = answer_toks # tokens of the original answer text
self.answer_chars = to_chars(answer_toks, cf.WORD_LEN, cf.PAD_CHAR) # list of chars of the answer text
self.span = span # The span (token-based index) of the answer in context
self.answer_start = answer_start # start character in original answer text
def vectorize(self, vocab: Vocabulary):
self.answer: List[int] = vocab.vectorize(self.answer_toks, cf.ANSWER_LEN)
self.answer_c: List[List[int]] = vocab.vectorize_c(self.answer_chars, cf.ANSWER_LEN, cf.WORD_LEN)
@classmethod
def parse_json(cls, answers_js: List[dict], context: str, context_toks: List[str], anchors: List[int]):
answers = []
for ans in answers_js:
ans_text = ans['text']
ans_start = ans['answer_start']
ans_toks = tokenize(ans_text)
# Identify the span from context, ans_text & start index
span = Span.allocate(anchors, ans_start, ans_start+len(ans_text)-1)
answers.append(Answer(ans_text, ans_toks, span, ans_start))
return answers
class Question(object):
def __init__(self, question_text: str, ques_id: str, question: List[str], answers: List[Answer], plausible_answers: List[Answer]):
self.question_text = question_text # original question text in JSON
self.question_toks = question # tokens of the original question text
self.question_chars = to_chars(question, cf.WORD_LEN, cf.PAD_CHAR) # list of chars of the question text
self.answers = answers # list of Answer object of the question
self.ques_id = ques_id # id of the question in JSON
self.plausible_answers = plausible_answers
self.paragraph = None # handle to the parent paragraph
def set_paragraph(self, paragraph):
self.paragraph = paragraph
def vectorize(self, vocab: Vocabulary):
self.question: List[int] = vocab.vectorize(self.question_toks, cf.QUERY_LEN)
self.question_c: List[List[int]] = vocab.vectorize_c(self.question_chars, cf.QUERY_LEN, cf.WORD_LEN)
for answer in self.answers:
answer.vectorize(vocab)
class Paragraph(object):
def __init__(self, raw_context: str, context_text: str, context_toks: List[str], questions: List[Question], para_idx: int, anchors: List[int]):
self.raw_context = raw_context # original context text in JSON
self.context_text = context_text # augmented from original context text with SPACES to guide the tokenization
self.context_toks = context_toks # tokens of the context text
self.context_chars = to_chars(context_toks, cf.WORD_LEN, cf.PAD_CHAR) # chars of the context
self.questions = questions # list of Question objects
self.local_word_vocab = self._build_local_word_vocab()
self.local_char_vocab = self._build_local_char_vocab()
self.para_idx = para_idx # Just for management & debug. Not used in experiment.
self.anchors = anchors
def _build_local_word_vocab(self):
local_vocab = set()
local_vocab = local_vocab.union(set(self.context_toks))
for question in self.questions:
local_vocab = local_vocab.union(set(question.question_toks))
for answer in question.answers + question.plausible_answers:
local_vocab = local_vocab.union(set(answer.answer_toks))
return local_vocab
def _build_local_char_vocab(self):
def char_set(tokens):
chars = set()
for tok in tokens:
chars = chars.union(set(tok))
return chars
char_vocab = set()
char_vocab = char_vocab.union(char_set(self.context_chars))
for question in self.questions:
char_vocab = char_vocab.union(char_set(question.question_chars))
for answer in question.answers + question.plausible_answers:
char_vocab = char_vocab.union(char_set(answer.answer_chars))
return char_vocab
@classmethod
def parse_json(cls, para_js: dict, para_idx: int):
# Accumulate all answers' tokens first
all_para_answers = []
for q in para_js['qas']:
if 'answers' in q:
all_para_answers.extend([ans for ans in q['answers']])
if 'plausible_answers' in q:
all_para_answers.extend([ans for ans in q['plausible_answers']])
# Improve the context for better tokenization
raw_context = para_js['context']
# context = augment_long_text(para_js['context'], all_para_answers)
context = raw_context
context_toks = tokenize_long_text(context)
context_toks = [t.strip(' ') for t in context_toks]
anchors = align(raw_context, context_toks)
questions = []
for q in para_js['qas']:
question_text = q['question']
q_toks = tokenize(question_text)
ques_id = q['id']
answers = Answer.parse_json(q['answers'], raw_context, context_toks, anchors) if 'answers' in q else []
plausible_answers = Answer.parse_json(q['plausible_answers'], raw_context, context_toks, anchors) if 'plausible_answers' in q else []
questions.append(Question(question_text, ques_id, q_toks, answers, plausible_answers))
para = Paragraph(raw_context, context, context_toks, questions, para_idx, anchors)
for ques in questions:
ques.set_paragraph(para)
return para
def vectorize(self, vocab):
"""
Vectorize pargraph context, question text & answer text based on given vocab.
"""
self.context: List[int] = vocab.vectorize(self.context_toks, cf.CONTEXT_LEN)
self.context_c: List[List[int]] = vocab.vectorize_c(self.context_chars, cf.CONTEXT_LEN, cf.WORD_LEN)
for question in self.questions:
question.vectorize(vocab)
def exact_match(gt_s, gt_e, pr_s, pr_e):
"""
Evaluate exact match of a predicted span over a ground truth span.
Args:
gt_s: index of the ground truth start position
gt_e: index of the ground truth end position
pr_s: index of the predicted start position
pr_e: index of the predicted end position
"""
return gt_s == pr_s and gt_e == pr_e
def f1(gt_s, gt_e, pr_s, pr_e):
"""
Evaluate F1 score of a predicted span over a ground truth span.
Args:
gt_s: index of the ground truth start position
gt_e: index of the ground truth end position
pr_s: index of the predicted start position
pr_e: index of the predicted end position
"""
gt = {idx for idx in range(gt_s, gt_e+1)}
pr = {idx for idx in range(pr_s, pr_e+1)}
intersection = gt.intersection(pr)
prec = 1. * len(intersection) / len(pr)
rec = 1. * len(intersection) / len(gt)
f1_score = (2. * prec * rec) / (prec+rec) if prec+rec != 0. else 0.
return f1_score
def get_score(metric, gt_starts, gt_ends, pred_start, pred_end):
"""
Args:
metric: a metric function to calculate the score (exact_match or f1_score)
gt_starts: (list) an array of start indices of the available answers
gt_ends: (list) an array of end indices of the available answers
pred_start: (int) predicted start index returned by a model
pred_end: (int) predicted end index returned by a model
Returns:
The best score of the metric evaluated on multiple answer spans.
"""
scores = []
for gt_s, gt_e in zip(gt_starts, gt_ends):
scores.append(metric(gt_s, gt_e, pred_start, pred_end))
return 1.0 * np.max(scores)
class SquadData(object):
"""
To save the whole object to pickle file:
```python
data.save('data/squad_processed.pkl')
```
To load the whole object from pickle file, and extract train & validation data
```python
data = SquadData.load('data/squad_processed.pkl')
ques_ids_train, X_train, y_train = data.train_data()
ques_ids_valid, X_valid, y_valid = data.validation_data()
```
To save structured data to binary files for fast loading:
```python
data.save(np_path='data/numpy')
```
To load numpy data from binary files:
```python
word_vectors, char_vectors, ques_ids_train, X_train, y_train, ques_ids_valid, X_valid, y_valid = SquadData.load(np_path='data/numpy')
```
"""
def __init__(self, train_paragraphs: List[Paragraph], dev_paragraphs: List[Paragraph], vocab: Vocabulary, squad_words: set, squad_chars: set):
"""
Initializer.
Args:
train_paragraphs: list of Paragraph objects from train data
dev_paragraphs: list of Paragraph objects from dev data
vocab: Vocabulary object which store vectors of words appearing in Squad data
squad_words: set of all tokens appearing in Squad data (context, question text, answer text).
Note that some tokens may not appear in vocab. They are treated as unknown words.
Note that this is a set of words, so it must not be used to map words to indices. Use Vocabulary.w2id instead.
squad_chars: set of all characters appearing in Squad data (context, question text, answer text).
"""
self.train_paragraphs = train_paragraphs
self.dev_paragraphs = dev_paragraphs
self.vocab = vocab
self.squad_words = squad_words
self.squad_chars = squad_chars
def summary(self):
print('Num of train paragraphs: {}'.format(len(self.train_paragraphs)))
print('Num of dev paragraphs: {}'.format(len(self.dev_paragraphs)))
print('Num words in vocab: {}'.format(self.vocab.vocab_size))
print('Num unique words: {}'.format(len(self.squad_words)))
print('Num unique chars: {}'.format(len(self.squad_chars)))
unknown_words = [w for w in self.squad_words if w not in self.vocab]
print('Num of unknown words: {}'.format(len(unknown_words)))
def _generate_data(self, paragraphs, dataset: str ='train'):
ques_ids = []
contextw_inp, queryw_inp, contextc_inp, queryc_inp = [], [], [], []
p1, p2, start, end = [], [], [], []
long_count = 0
for para in paragraphs:
for ques in para.questions:
if dataset == 'train':
for ans in ques.answers:
if ans.span.start >= cf.CONTEXT_LEN or ans.span.end >= cf.CONTEXT_LEN:
# print('ques.ques_id:', ques.ques_id, ',', 'ans.span.start, end:', ans.span.start, ',', ans.span.end)
long_count += 1
continue
ques_ids.append(ques.ques_id)
contextw_inp.append(para.context)
queryw_inp.append(ques.question)
contextc_inp.append(para.context_c)
queryc_inp.append(ques.question_c)
vect = np.zeros(cf.CONTEXT_LEN, dtype=np.float16)
vect[ans.span.start] = 1.
p1.append(vect)
vect = np.zeros(cf.CONTEXT_LEN, dtype=np.float16)
vect[ans.span.end] = 1.
p2.append(vect)
start.append(ans.span.start)
end.append(ans.span.end)
else: # dev dataset
ques_ids.append(ques.ques_id)
contextw_inp.append(para.context)
queryw_inp.append(ques.question)
contextc_inp.append(para.context_c)
queryc_inp.append(ques.question_c)
start_list = []
end_list = []
for ans in ques.answers:
if ans.span.start >= cf.CONTEXT_LEN or ans.span.end >= cf.CONTEXT_LEN:
long_count += 1
continue
start_list.append(ans.span.start)
end_list.append(ans.span.end)
# p1, p2 are ignored in dev set
start.append(start_list)
end.append(end_list)
print('There are {} long answers'.format(long_count))
ques_ids = np.array(ques_ids)
contextw_inp, queryw_inp, contextc_inp, queryc_inp = np.array(contextw_inp), np.array(queryw_inp), np.array(contextc_inp), np.array(queryc_inp)
p1, p2, start, end = np.array(p1), np.array(p2), np.array(start), np.array(end)
return (ques_ids, [contextw_inp, queryw_inp, contextc_inp, queryc_inp], [p1, p2, start, end])
def train_data(self):
return self._generate_data(self.train_paragraphs)
def validation_data(self):
return self._generate_data(self.dev_paragraphs, dataset='dev')
def search_paragraph(self, para_idx: int, dataset: str ='train'):
"""
Search for paragraph by index. This function is used for debug only.
"""
paragraphs = self.train_paragraphs if dataset == 'train' else self.dev_paragraphs
for para in paragraphs:
if para.para_idx == para_idx:
return para
return None
def search_question(self, ques_id: str, dataset: str ='train'):
"""
Search for question by ques_id. This function is used for debug only.
"""
paragraphs = self.train_paragraphs if dataset == 'train' else self.dev_paragraphs
for para in paragraphs:
for ques in para.questions:
if ques.ques_id == ques_id:
return ques
return None
@classmethod
def evaluate(cls, gt_start_list, gt_end_list, pred_starts, pred_ends):
"""
Evaluate ExactMatch score & F1 score of predictions on a validation set.
Args:
gt_start_list: list of start indices of multiple ground-truth answer spans
gt_start_list: list of end indices of multiple ground-truth answer spans
pred_starts: list of predicted start indices
pred_ends: list of predicted end indices
Returns:
A hash with 2 keys: 'exact_match' & 'f1'
"""
em_score = 0
f1_score = 0
total = 0
for gt_starts, gt_ends, pred_start, pred_end in zip(gt_start_list, gt_end_list, pred_starts, pred_ends):
if len(gt_starts) > 0:
em_score += get_score(exact_match, gt_starts, gt_ends, pred_start, pred_end)
f1_score += get_score(f1, gt_starts, gt_ends, pred_start, pred_end)
# If gt_starts is empty, the ground-truth answer is over the limit length of the input text.
# We give penalty for that case, that means we give 0 to EM & F1 while we increase the total.
total += 1
em_score = 100. * em_score / total
f1_score = 100. * f1_score / total
em_score, f1_score
return {
'exact_match': em_score,
'f1': f1_score
}
def save(self, filepath=None, np_path=None):
def save_data(prefix, ques_ids,
contextw, queryw, contextc, queryc,
p1, p2, start, end):
np.save(np_path + '/%s_ques_ids.npy' % prefix, ques_ids)
np.save(np_path + '/%s_contextw.npy' % prefix, contextw)
np.save(np_path + '/%s_queryw.npy' % prefix, queryw)
np.save(np_path + '/%s_contextc.npy' % prefix, contextc)
np.save(np_path + '/%s_queryc.npy' % prefix, queryc)
np.save(np_path + '/%s_p1.npy' % prefix, p1)
np.save(np_path + '/%s_p2.npy' % prefix, p2)
np.save(np_path + '/%s_start.npy' % prefix, start)
np.save(np_path + '/%s_end.npy' % prefix, end)
if filepath: # Save the SquadData object to pickle file (slow)
print('Saving squad data to {}...'.format(filepath))
with open(filepath, 'wb') as f:
pickle.dump(self, f)
else: # Save the binary data to *.npy files (fast)
print('Accumulating train & validation arrays from the structure...')
t_ques_ids, X_train, y_train = self.train_data()
v_ques_ids, X_valid, y_valid = self.validation_data()
t_contextw, t_queryw, t_contextc, t_queryc = X_train
t_p1, t_p2, t_start, t_end = y_train
v_contextw, v_queryw, v_contextc, v_queryc = X_valid
v_p1, v_p2, v_start, v_end = y_valid
if not os.path.exists(np_path):
os.makedirs(np_path)
print('Saving word vectors into numpy files...')
word_vectors = self.vocab.get_embed_weights()
char_vectors = self.vocab.get_char_embed_weights()
np.save(np_path + '/word_vectors.npy', word_vectors)
np.save(np_path + '/char_vectors.npy', char_vectors)
print('Saving train arrays into numpy files...')
save_data(
'train', t_ques_ids,
t_contextw, t_queryw, t_contextc, t_queryc,
t_p1, t_p2, t_start, t_end)
print('Saving validation arrays into numpy files...')
save_data(
'val', v_ques_ids,
v_contextw, v_queryw, v_contextc, v_queryc,
v_p1, v_p2, v_start, v_end)
@classmethod
def load(cls, filepath=None, np_path=None):
def load_data(prefix):
ques_ids = np.load(np_path + '/%s_ques_ids.npy' % prefix)
contextw = np.load(np_path + '/%s_contextw.npy' % prefix)
queryw = np.load(np_path + '/%s_queryw.npy' % prefix)
contextc = np.load(np_path + '/%s_contextc.npy' % prefix)
queryc = np.load(np_path + '/%s_queryc.npy' % prefix)
p1 = np.load(np_path + '/%s_p1.npy' % prefix)
p2 = np.load(np_path + '/%s_p2.npy' % prefix)
start = np.load(np_path + '/%s_start.npy' % prefix)
end = np.load(np_path + '/%s_end.npy' % prefix)
return ques_ids, contextw, queryw, contextc, queryc, p1, p2, start, end
if filepath: # Load SquadData object from pickle file (slow)
print('Loading squad data from pickle file {}...'.format(filepath))
with open(filepath, 'rb') as f:
return pickle.load(f)
else: # Load binary data from *.npy files (fast)
print('Loading word vectors from numpy files...')
word_vectors = np.load(np_path + '/word_vectors.npy')
char_vectors = np.load(np_path + '/char_vectors.npy')
print('Loading train arrays from numpy files...')
t_ques_ids, t_contextw, t_queryw, t_contextc, t_queryc, t_p1, t_p2, t_start, t_end = load_data('train')
print('Loading validation arrays from numpy files...')
v_ques_ids, v_contextw, v_queryw, v_contextc, v_queryc, v_p1, v_p2, v_start, v_end = load_data('val')
return [
word_vectors,
char_vectors,
t_ques_ids,
[t_contextw, t_queryw, t_contextc, t_queryc],
[t_p1, t_p2, t_start, t_end],
v_ques_ids,
[v_contextw, v_queryw, v_contextc, v_queryc],
[v_p1, v_p2, v_start, v_end]
]
| [
"os.path.exists",
"pickle.dump",
"os.makedirs",
"pickle.load",
"utilities.align",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"utilities.to_chars",
"numpy.save",
"utilities.tokenize",
"utilities.tokenize_long_text",
"numpy.load"
] | [((6131, 6178), 'utilities.to_chars', 'to_chars', (['answer_toks', 'cf.WORD_LEN', 'cf.PAD_CHAR'], {}), '(answer_toks, cf.WORD_LEN, cf.PAD_CHAR)\n', (6139, 6178), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((7546, 7590), 'utilities.to_chars', 'to_chars', (['question', 'cf.WORD_LEN', 'cf.PAD_CHAR'], {}), '(question, cf.WORD_LEN, cf.PAD_CHAR)\n', (7554, 7590), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((8794, 8842), 'utilities.to_chars', 'to_chars', (['context_toks', 'cf.WORD_LEN', 'cf.PAD_CHAR'], {}), '(context_toks, cf.WORD_LEN, cf.PAD_CHAR)\n', (8802, 8842), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((10832, 10859), 'utilities.tokenize_long_text', 'tokenize_long_text', (['context'], {}), '(context)\n', (10850, 10859), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((10938, 10970), 'utilities.align', 'align', (['raw_context', 'context_toks'], {}), '(raw_context, context_toks)\n', (10943, 10970), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((13832, 13846), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (13838, 13846), True, 'import numpy as np\n'), ((18580, 18598), 'numpy.array', 'np.array', (['ques_ids'], {}), '(ques_ids)\n', (18588, 18598), True, 'import numpy as np\n'), ((6915, 6933), 'utilities.tokenize', 'tokenize', (['ans_text'], {}), '(ans_text)\n', (6923, 6933), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((11090, 11113), 'utilities.tokenize', 'tokenize', (['question_text'], {}), '(question_text)\n', (11098, 11113), False, 'from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align\n'), ((18660, 18682), 'numpy.array', 'np.array', (['contextw_inp'], {}), '(contextw_inp)\n', (18668, 18682), True, 'import numpy as np\n'), ((18684, 18704), 'numpy.array', 'np.array', (['queryw_inp'], {}), '(queryw_inp)\n', (18692, 18704), True, 'import numpy as np\n'), ((18706, 18728), 'numpy.array', 'np.array', (['contextc_inp'], {}), '(contextc_inp)\n', (18714, 18728), True, 'import numpy as np\n'), ((18730, 18750), 'numpy.array', 'np.array', (['queryc_inp'], {}), '(queryc_inp)\n', (18738, 18750), True, 'import numpy as np\n'), ((18780, 18792), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (18788, 18792), True, 'import numpy as np\n'), ((18794, 18806), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (18802, 18806), True, 'import numpy as np\n'), ((18808, 18823), 'numpy.array', 'np.array', (['start'], {}), '(start)\n', (18816, 18823), True, 'import numpy as np\n'), ((18825, 18838), 'numpy.array', 'np.array', (['end'], {}), '(end)\n', (18833, 18838), True, 'import numpy as np\n'), ((21543, 21599), 'numpy.save', 'np.save', (["(np_path + '/%s_ques_ids.npy' % prefix)", 'ques_ids'], {}), "(np_path + '/%s_ques_ids.npy' % prefix, ques_ids)\n", (21550, 21599), True, 'import numpy as np\n'), ((21612, 21668), 'numpy.save', 'np.save', (["(np_path + '/%s_contextw.npy' % prefix)", 'contextw'], {}), "(np_path + '/%s_contextw.npy' % prefix, contextw)\n", (21619, 21668), True, 'import numpy as np\n'), ((21681, 21733), 'numpy.save', 'np.save', (["(np_path + '/%s_queryw.npy' % prefix)", 'queryw'], {}), "(np_path + '/%s_queryw.npy' % prefix, queryw)\n", (21688, 21733), True, 'import numpy as np\n'), ((21746, 21802), 'numpy.save', 'np.save', (["(np_path + '/%s_contextc.npy' % prefix)", 'contextc'], {}), "(np_path + '/%s_contextc.npy' % prefix, contextc)\n", (21753, 21802), True, 'import numpy as np\n'), ((21815, 21867), 'numpy.save', 'np.save', (["(np_path + '/%s_queryc.npy' % prefix)", 'queryc'], {}), "(np_path + '/%s_queryc.npy' % prefix, queryc)\n", (21822, 21867), True, 'import numpy as np\n'), ((21880, 21924), 'numpy.save', 'np.save', (["(np_path + '/%s_p1.npy' % prefix)", 'p1'], {}), "(np_path + '/%s_p1.npy' % prefix, p1)\n", (21887, 21924), True, 'import numpy as np\n'), ((21937, 21981), 'numpy.save', 'np.save', (["(np_path + '/%s_p2.npy' % prefix)", 'p2'], {}), "(np_path + '/%s_p2.npy' % prefix, p2)\n", (21944, 21981), True, 'import numpy as np\n'), ((21994, 22044), 'numpy.save', 'np.save', (["(np_path + '/%s_start.npy' % prefix)", 'start'], {}), "(np_path + '/%s_start.npy' % prefix, start)\n", (22001, 22044), True, 'import numpy as np\n'), ((22057, 22103), 'numpy.save', 'np.save', (["(np_path + '/%s_end.npy' % prefix)", 'end'], {}), "(np_path + '/%s_end.npy' % prefix, end)\n", (22064, 22103), True, 'import numpy as np\n'), ((23110, 23162), 'numpy.save', 'np.save', (["(np_path + '/word_vectors.npy')", 'word_vectors'], {}), "(np_path + '/word_vectors.npy', word_vectors)\n", (23117, 23162), True, 'import numpy as np\n'), ((23175, 23227), 'numpy.save', 'np.save', (["(np_path + '/char_vectors.npy')", 'char_vectors'], {}), "(np_path + '/char_vectors.npy', char_vectors)\n", (23182, 23227), True, 'import numpy as np\n'), ((23804, 23850), 'numpy.load', 'np.load', (["(np_path + '/%s_ques_ids.npy' % prefix)"], {}), "(np_path + '/%s_ques_ids.npy' % prefix)\n", (23811, 23850), True, 'import numpy as np\n'), ((23874, 23920), 'numpy.load', 'np.load', (["(np_path + '/%s_contextw.npy' % prefix)"], {}), "(np_path + '/%s_contextw.npy' % prefix)\n", (23881, 23920), True, 'import numpy as np\n'), ((23942, 23986), 'numpy.load', 'np.load', (["(np_path + '/%s_queryw.npy' % prefix)"], {}), "(np_path + '/%s_queryw.npy' % prefix)\n", (23949, 23986), True, 'import numpy as np\n'), ((24010, 24056), 'numpy.load', 'np.load', (["(np_path + '/%s_contextc.npy' % prefix)"], {}), "(np_path + '/%s_contextc.npy' % prefix)\n", (24017, 24056), True, 'import numpy as np\n'), ((24078, 24122), 'numpy.load', 'np.load', (["(np_path + '/%s_queryc.npy' % prefix)"], {}), "(np_path + '/%s_queryc.npy' % prefix)\n", (24085, 24122), True, 'import numpy as np\n'), ((24140, 24180), 'numpy.load', 'np.load', (["(np_path + '/%s_p1.npy' % prefix)"], {}), "(np_path + '/%s_p1.npy' % prefix)\n", (24147, 24180), True, 'import numpy as np\n'), ((24198, 24238), 'numpy.load', 'np.load', (["(np_path + '/%s_p2.npy' % prefix)"], {}), "(np_path + '/%s_p2.npy' % prefix)\n", (24205, 24238), True, 'import numpy as np\n'), ((24259, 24302), 'numpy.load', 'np.load', (["(np_path + '/%s_start.npy' % prefix)"], {}), "(np_path + '/%s_start.npy' % prefix)\n", (24266, 24302), True, 'import numpy as np\n'), ((24321, 24362), 'numpy.load', 'np.load', (["(np_path + '/%s_end.npy' % prefix)"], {}), "(np_path + '/%s_end.npy' % prefix)\n", (24328, 24362), True, 'import numpy as np\n'), ((24838, 24876), 'numpy.load', 'np.load', (["(np_path + '/word_vectors.npy')"], {}), "(np_path + '/word_vectors.npy')\n", (24845, 24876), True, 'import numpy as np\n'), ((24904, 24942), 'numpy.load', 'np.load', (["(np_path + '/char_vectors.npy')"], {}), "(np_path + '/char_vectors.npy')\n", (24911, 24942), True, 'import numpy as np\n'), ((4154, 4193), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '[emb_size]'], {}), '(0.0, 1.0, [emb_size])\n', (4171, 4193), True, 'import numpy as np\n'), ((22305, 22325), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (22316, 22325), False, 'import pickle\n'), ((22853, 22876), 'os.path.exists', 'os.path.exists', (['np_path'], {}), '(np_path)\n', (22867, 22876), False, 'import os\n'), ((22894, 22914), 'os.makedirs', 'os.makedirs', (['np_path'], {}), '(np_path)\n', (22905, 22914), False, 'import os\n'), ((24667, 24681), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (24678, 24681), False, 'import pickle\n'), ((17248, 17290), 'numpy.zeros', 'np.zeros', (['cf.CONTEXT_LEN'], {'dtype': 'np.float16'}), '(cf.CONTEXT_LEN, dtype=np.float16)\n', (17256, 17290), True, 'import numpy as np\n'), ((17413, 17455), 'numpy.zeros', 'np.zeros', (['cf.CONTEXT_LEN'], {'dtype': 'np.float16'}), '(cf.CONTEXT_LEN, dtype=np.float16)\n', (17421, 17455), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch
class ElasticAdapter(object):
""" Abstraction in case we will need to add another or change elastic
driver.
"""
def __init__(self, hosts, **es_params):
self.es = Elasticsearch(hosts, **es_params)
| [
"elasticsearch.Elasticsearch"
] | [((258, 291), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['hosts'], {}), '(hosts, **es_params)\n', (271, 291), False, 'from elasticsearch import Elasticsearch\n')] |
from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand
from mcfunction.nodes import EntityNode, PositionNode
def test_loot_spawn():
parsed = loot.parse('loot spawn 0 0 0 kill @e')
parsed: ParsedLootCommand
assert parsed.target_type.value == 'spawn'
assert isinstance(parsed.target, PositionNode)
assert parsed.source_type.value == 'kill'
assert isinstance(parsed.source, EntityNode)
assert str(parsed) == 'loot spawn 0 0 0 kill @e'
def test_loot_replace():
parsed = loot.parse('loot replace entity @s hotbar.slot_number.0 9 '
'kill @e')
parsed: ParsedLootCommand
assert parsed.target_type.value == 'replace'
assert parsed.target_type2.value == 'entity'
assert isinstance(parsed.target, EntityNode)
assert parsed.slot.value == 'hotbar.slot_number.0'
assert parsed.count.value == 9
assert str(parsed) == 'loot replace entity @s hotbar.slot_number.0 9 ' \
'kill @e'
def test_loot_fish():
parsed = loot.parse('loot spawn 0 0 0 fish test:loot_table 0 0 0')
parsed: ParsedLootCommand
assert parsed.source_type.value == 'fish'
assert parsed.source.namespace == 'test'
assert parsed.source.name == 'loot_table'
assert isinstance(parsed.source_position, PositionNode)
assert str(parsed) == 'loot spawn 0 0 0 fish test:loot_table 0 0 0'
def test_loot_fish_tool():
parsed = loot.parse('loot spawn 0 0 0 fish test:loot_table 0 0 0 mainhand')
parsed: ParsedLootCommand
assert parsed.source_tool.value == 'mainhand'
assert str(parsed) == 'loot spawn 0 0 0 fish test:loot_table 0 0 0 ' \
'mainhand'
def test_loot_mine():
parsed = loot.parse('loot spawn 0 0 0 mine 0 0 0 mainhand')
parsed: ParsedLootCommand
assert parsed.source_tool.value == 'mainhand'
assert str(parsed) == 'loot spawn 0 0 0 mine 0 0 0 mainhand'
| [
"mcfunction.versions.mc_1_14.loot.loot.parse"
] | [((162, 200), 'mcfunction.versions.mc_1_14.loot.loot.parse', 'loot.parse', (['"""loot spawn 0 0 0 kill @e"""'], {}), "('loot spawn 0 0 0 kill @e')\n", (172, 200), False, 'from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand\n'), ((519, 586), 'mcfunction.versions.mc_1_14.loot.loot.parse', 'loot.parse', (['"""loot replace entity @s hotbar.slot_number.0 9 kill @e"""'], {}), "('loot replace entity @s hotbar.slot_number.0 9 kill @e')\n", (529, 586), False, 'from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand\n'), ((1033, 1090), 'mcfunction.versions.mc_1_14.loot.loot.parse', 'loot.parse', (['"""loot spawn 0 0 0 fish test:loot_table 0 0 0"""'], {}), "('loot spawn 0 0 0 fish test:loot_table 0 0 0')\n", (1043, 1090), False, 'from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand\n'), ((1434, 1500), 'mcfunction.versions.mc_1_14.loot.loot.parse', 'loot.parse', (['"""loot spawn 0 0 0 fish test:loot_table 0 0 0 mainhand"""'], {}), "('loot spawn 0 0 0 fish test:loot_table 0 0 0 mainhand')\n", (1444, 1500), False, 'from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand\n'), ((1732, 1782), 'mcfunction.versions.mc_1_14.loot.loot.parse', 'loot.parse', (['"""loot spawn 0 0 0 mine 0 0 0 mainhand"""'], {}), "('loot spawn 0 0 0 mine 0 0 0 mainhand')\n", (1742, 1782), False, 'from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand\n')] |
import configparser as parser
import random
class config:
# load the configuration file
def __init__(self, config_filename):
self.load_config(config_filename)
def load_config(self, config_filename):
# create a config parser
config = parser.ConfigParser()
config.optionxform = str
# read the file
config.read(config_filename)
# read the values
dictionary = {}
for section in config.sections():
print('Found section: ' + section)
dictionary[section] = {}
for option in config.options(section):
dictionary[section][option] = config.get(section, option).splitlines()
self.phrases = dictionary['phrases']
if 'defaults' in dictionary and 'subjects' in dictionary:
self.has_subjects = True
self.defaults = dictionary['defaults']
self.subjects = dictionary['subjects']
for subject in self.subjects:
self.subjects[subject] = self.subjects[subject][0].split(',')
print('loaded defaults and subjects')
else:
self.has_subjects = False
def create_subjects(self, number = 0):
if number == 0:
number = int(self.defaults['num_subjects'][0])
if self.has_subjects:
first_subject = random.choice(list(self.subjects))
subjects = [first_subject]
for i in range(1,number):
subjects.append(self.get_adjacent_subject(subjects[i-1]))
self.current_subjects = subjects
else:
pass
def get_adjacent_subject(self, subject):
node = self.subjects[subject]
return random.choice(node)
def get_subject(self):
return random.choice(self.current_subjects)
def get_phrase(self, key):
try:
string_to_return = random.choice(self.phrases[key])
if string_to_return == 'none':
return ''
else:
return string_to_return
except:
print('Could not find phrases with key ' + key)
return ''
| [
"random.choice",
"configparser.ConfigParser"
] | [((271, 292), 'configparser.ConfigParser', 'parser.ConfigParser', ([], {}), '()\n', (290, 292), True, 'import configparser as parser\n'), ((1728, 1747), 'random.choice', 'random.choice', (['node'], {}), '(node)\n', (1741, 1747), False, 'import random\n'), ((1791, 1827), 'random.choice', 'random.choice', (['self.current_subjects'], {}), '(self.current_subjects)\n', (1804, 1827), False, 'import random\n'), ((1904, 1936), 'random.choice', 'random.choice', (['self.phrases[key]'], {}), '(self.phrases[key])\n', (1917, 1936), False, 'import random\n')] |
import numpy
from kapteyn import maputils
from matplotlib.pyplot import show, figure
import csv # Read some poitions from file in Comma Separated Values format
# Some initializations
blankcol = "#334455" # Represent undefined values by this color
epsilon = 0.0000000001
figsize = (9,7) # Figure size in inches
plotbox = (0.1,0.05,0.8,0.8)
fig = figure(figsize=figsize)
frame = fig.add_axes(plotbox)
Basefits = maputils.FITSimage("allsky_raw.fits") # Here is your downloaded FITS file in rectangular coordinates
Basefits.hdr['CTYPE1'] = 'GLON-CAR' # For transformations we need to give it a projection type
Basefits.hdr['CTYPE2'] = 'GLAT-CAR' # CAR is rectangular
# Use some header values to define reprojection parameters
cdelt1 = Basefits.hdr['CDELT1']
cdelt2 = Basefits.hdr['CDELT2']
naxis1 = Basefits.hdr['NAXIS1']
naxis2 = Basefits.hdr['NAXIS2']
# Header works only with a patched wcslib 4.3
# Note that changing CRVAL1 to 180 degerees, shifts the plot 180 deg.
header = {'NAXIS' : 2, 'NAXIS1': naxis1, 'NAXIS2': naxis2,
'CTYPE1' : 'GLON-AIT',
'CRVAL1' : 0, 'CRPIX1' : naxis1//2, 'CUNIT1' : 'deg', 'CDELT1' : cdelt1,
'CTYPE2' : 'GLAT-AIT',
'CRVAL2' : 30.0, 'CRPIX2' : naxis2//2, 'CUNIT2' : 'deg', 'CDELT2' : cdelt2,
'LONPOLE' :60.0,
'PV1_1' : 0.0, 'PV1_2' : 90.0, # IMPORTANT. This is a setting from Cal.section 7.1, p 1103
}
Reprojfits = Basefits.reproject_to(header)
annim_rep = Reprojfits.Annotatedimage(frame)
annim_rep.set_colormap("heat.lut") # Set color map before creating Image object
annim_rep.set_blankcolor(blankcol) # Background are NaN's (blanks). Set color here
annim_rep.Image(vmin=30000, vmax=150000) # Just a selection of two clip levels
annim_rep.plot()
# Draw the graticule, but do not cover near -90 to prevent ambiguity
X = numpy.arange(0,390.0,15.0);
Y = numpy.arange(-75,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, color='w', lw=2)
grat.setp_lineswcs1(0, color='w', lw=2)
# Draw border with standard graticule, just to make the borders look smooth
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
del header['PV1_1']
del header['PV1_2']
header['LONPOLE'] = 0.0
header['LATPOLE'] = 0.0
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), skipy=True)
border.setp_lineswcs0(color='w', lw=2) # Show borders in arbitrary color (e.g. background color)
border.setp_lineswcs1(color='w', lw=2)
# Plot the 'inside' graticules
lon_constval = 0.0
lat_constval = 0.0
lon_fmt = 'Dms'; lat_fmt = 'Dms' # Only Degrees must be plotted
addangle0 = addangle1=0.0
deltapx0 = deltapx1 = 1.0
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'r', 'va':'center', 'ha':'center'}
lon_world = list(range(0,360,30))
lat_world = [-60, -30, 30, 60]
ilabs1 = grat.Insidelabels(wcsaxis=0,
world=lon_world, constval=lat_constval,
deltapx=1.0, deltapy=1.0,
addangle=addangle0, fmt=lon_fmt, **labkwargs0)
ilabs2 = grat.Insidelabels(wcsaxis=1,
world=lat_world, constval=lon_constval,
deltapx=1.0, deltapy=1.0,
addangle=addangle1, fmt=lat_fmt, **labkwargs1)
# Read marker positions (in 0h0m0s 0d0m0s format) from file
reader = csv.reader(open("positions.txt"), delimiter=' ', skipinitialspace=True)
for line in reader:
if line:
hms, dms = line
postxt = "{eq fk4-no-e} "+hms+" {} "+dms # Define the sky system of the source
print(postxt)
annim.Marker(pos=postxt, marker='*', color='yellow', ms=20)
# Plot a title
titlepos = 1.02
title = r"""All sky map in Hammer Aitoff projection (AIT) oblique with:
$(\alpha_p,\delta_p) = (0^\circ,30^\circ)$, $\phi_p = 75^\circ$ also:
$(\phi_0,\theta_0) = (0^\circ,90^\circ)$."""
t = frame.set_title(title, color='g', fontsize=13, linespacing=1.5)
t.set_y(titlepos)
annim.plot()
annim.interact_toolbarinfo()
annim_rep.interact_imagecolors()
show() | [
"matplotlib.pyplot.figure",
"kapteyn.maputils.FITSimage",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((387, 410), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (393, 410), False, 'from matplotlib.pyplot import show, figure\n'), ((453, 490), 'kapteyn.maputils.FITSimage', 'maputils.FITSimage', (['"""allsky_raw.fits"""'], {}), "('allsky_raw.fits')\n", (471, 490), False, 'from kapteyn import maputils\n'), ((1933, 1961), 'numpy.arange', 'numpy.arange', (['(0)', '(390.0)', '(15.0)'], {}), '(0, 390.0, 15.0)\n', (1945, 1961), False, 'import numpy\n'), ((1966, 1993), 'numpy.arange', 'numpy.arange', (['(-75)', '(90)', '(15.0)'], {}), '(-75, 90, 15.0)\n', (1978, 1993), False, 'import numpy\n'), ((1996, 2037), 'kapteyn.maputils.FITSimage', 'maputils.FITSimage', ([], {'externalheader': 'header'}), '(externalheader=header)\n', (2014, 2037), False, 'from kapteyn import maputils\n'), ((4320, 4326), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (4324, 4326), False, 'from matplotlib.pyplot import show, figure\n')] |
import sys
import gtk
from datetime import datetime
import gobject
from threading import Thread
class uiSignalHelpers(object):
def __init__(self, *args, **kwargs):
super(uiSignalHelpers, self).__init__(*args, **kwargs)
#print 'signal helpers __init__'
def callback(self, *args, **kwargs):
super(uiSignalHelpers, self).callback(*args, **kwargs)
#print 'signal helpers callback'
def gtk_widget_show(self, w, e = None):
w.show()
return True
def gtk_widget_hide(self, w, e = None):
w.hide()
return True
def information_message(self, widget, message, cb = None):
self.attention = "INFO: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, message)
messagedialog.connect("delete-event", lambda w, e: w.hide() or True)
if cb:
messagedialog.connect("response", cb)
messagedialog.set_default_response(gtk.RESPONSE_OK)
messagedialog.show()
messagedialog.present()
return messagedialog
def error_message(self, widget, message):
self.attention = "ERROR: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CANCEL, message)
messagedialog.run()
messagedialog.destroy()
def warning_message(self, widget, message):
self.attention = "WARNING: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL, message)
messagedialog.show()
messagedialog.present()
messagedialog.run()
messagedialog.destroy()
def question_message(self, widget, message, cb = None):
self.attention = "QUESTION: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
messagedialog.connect("delete-event", lambda w, e: w.hide() or True)
if cb:
messagedialog.connect("response", cb)
messagedialog.set_default_response(gtk.RESPONSE_YES)
messagedialog.show()
messagedialog.present()
return messagedialog
def interval_dialog(self, message):
if not self.interval_dialog_showing:
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
self.interval_dialog_showing = True
return self.question_message(self.timetracker_window, message, self.on_interval_dialog)
return None
def stop_interval_dialog(self, message):
if not self.stop_interval_dialog_showing:
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
self.stop_interval_dialog_showing = True
return self.information_message(self.timetracker_window, message, self.on_stopped)
return None
def set_custom_label(self, widget, text):
#set custom label on stock button
Label = widget.get_children()[0]
Label = Label.get_children()[0].get_children()[1]
Label = Label.set_label(text)
def window_state(self, widget, state):
self.timetracker_window_state = state.new_window_state
class uiSignals(uiSignalHelpers):
def __init__(self, *args, **kwargs):
super(uiSignals, self).__init__(*args, **kwargs)
#these are components defined inside the ui file
#print 'signals __init__'
self.preferences_window.connect('delete-event', lambda w, e: w.hide() or True)
self.timetracker_window.connect('delete-event', lambda w, e: w.hide() or True)
self.timetracker_window.connect('destroy', lambda w, e: w.hide() or True)
self.timetracker_window.connect("window-state-event", self.window_state)
self.about_dialog.connect("delete-event", lambda w, e: w.hide() or True)
self.about_dialog.connect("response", lambda w, e: w.hide() or True)
self.notes_textview.connect('key_press_event', self.on_textview_ctrl_enter)
def callback(self, *args, **kwargs): #stub
super(uiSignals, self).callback(*args, **kwargs) #executed after init, hopefully this will let me inject interrupts
#print 'signals callback'
self.icon.connect('activate', self.left_click)
self.icon.connect("popup-menu", self.right_click)
if sys.platform == "win32":
from gtkwin32 import GTKWin32Ext
self.timetracker_window.realize()
self.win32ext = GTKWin32Ext(self.timetracker_window)
self.win32ext.add_notify_icon()
def before_init(self): #stub for later
#print 'signals before init'
pass
def after_init(self): #init any other callback we can't setup in the actual init phase
#print 'signals after init'
self.project_combobox_handler = self.project_combobox.connect('changed', self.on_project_combobox_changed)
self.task_combobox_handler = self.task_combobox.connect('changed', self.on_task_combobox_changed)
def on_show_about_dialog(self, widget):
self.about_dialog.show()
def on_interval_dialog(self, dialog, a): #interval_dialog callback
if a == gtk.RESPONSE_NO:
self.refresh_and_show()
else:
#keep the timer running
self.running = True
self.current_selected_project_id = self.last_project_id
self.current_selected_task_id = self.last_task_id
self.current_notes = self.get_notes(self.last_notes)
self.current_hours = "%0.02f" % round(float(self.last_hours) + float(self.interval), 2)
self.current_text = self.last_text
self.current_entry_id = self.last_entry_id
entry = self.harvest.update(self.current_entry_id, {#append to existing timer
'notes': self.current_notes,
'hours': self.current_hours,
'project_id': self.current_project_id,
'task_id': self.current_task_id
})
self.refresh_and_show()
self.timetracker_window.hide() #hide timetracker and continue task
dialog.destroy()
self.attention = None
self.interval_dialog_showing = False
def on_textview_ctrl_enter(self, widget, event):
'''
submit clicked event on ctrl+enter in notes textview
'''
if event.state == gtk.gdk.CONTROL_MASK and \
gtk.gdk.keyval_name(event.keyval) == "Return":
self.submit_button.emit('clicked')
def on_stopped(self, dialog):
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
dialog.destroy()
self.attention = None
self.stop_interval_dialog_showing = False
def on_save_preferences_button_clicked(self, widget):
if self.running: #if running it will turn off, lets empty the comboboxes
#stop the timer
#self.toggle_current_timer(self.current_entry_id) #maybe add pref option to kill timer on pref change?
if self.interval_dialog_instance:
self.interval_dialog_instance.hide() #hide the dialog
self.stop_and_refactor_time()
self.get_prefs()
if self.connect_to_harvest():
self.preferences_window.hide()
self.timetracker_window.show()
self.timetracker_window.present()
def on_task_combobox_changed(self, widget):
new_idx = widget.get_active()
if new_idx != -1:
if new_idx != self.current_selected_task_idx: #-1 is sent from pygtk loop or something
self.current_selected_task_id = self.get_combobox_selection(widget)
self.current_selected_task_idx = new_idx
self.refresh_comboboxes()
def on_project_combobox_changed(self, widget):
self.current_selected_project_id = self.get_combobox_selection(widget)
new_idx = widget.get_active()
if new_idx != -1:
#reset task when new project is selected
self.current_selected_project_idx = new_idx
self.current_selected_task_id = None
self.current_selected_task_idx = 0
self.refresh_comboboxes()
def on_show_preferences(self, widget):
self.preferences_window.show()
self.preferences_window.present()
def on_away_from_desk(self, widget):
#toggle away state
if self.running:
self.away_from_desk = True if not self.away_from_desk else False
def on_check_for_updates(self, widget):
pass
def on_top(self, widget):
self.always_on_top = False if self.always_on_top else True
self.timetracker_window.set_keep_above(self.always_on_top)
def on_submit_button_clicked(self, widget):
self.away_from_desk = False
self.attention = None
self.append_add_entry()
self.set_textview_text(self.notes_textview, "")
self.notes_textview.grab_focus()
def on_stop_timer(self, widget):
self.stop_and_refactor_time()
def on_quit(self, widget):
if self.running and self.harvest:
self.harvest.toggle_timer(self.current_entry_id)
gtk.main_quit()
def refresh_and_show(self):
self.set_entries()
self.timetracker_window.show()
self.timetracker_window.present()
self.notes_textview.grab_focus()
def on_refresh(self, widget):
self.refresh_and_show()
def left_click(self, widget):
self.refresh_and_show()
def right_click(self, widget, button, time):
#create popup menu
menu = gtk.Menu()
refresh = gtk.ImageMenuItem(gtk.STOCK_REFRESH)
refresh.connect("activate", self.on_refresh)
menu.append(refresh)
if self.running:
stop_timer = gtk.MenuItem("Stop Timer")
stop_timer.connect("activate", self.on_stop_timer)
menu.append(stop_timer)
if not self.away_from_desk:
away = gtk.ImageMenuItem(gtk.STOCK_NO)
away.set_label("Away from desk")
else:
away = gtk.ImageMenuItem(gtk.STOCK_YES)
away.set_label("Back at desk")
away.connect("activate", self.on_away_from_desk)
menu.append(away)
top = gtk.MenuItem("Always on top")
prefs = gtk.MenuItem("Preferences")
about = gtk.MenuItem("About")
quit = gtk.MenuItem("Quit")
top.connect("activate", self.on_top)
prefs.connect("activate", self.on_show_preferences)
about.connect("activate", self.on_show_about_dialog)
quit.connect("activate", self.on_quit)
menu.append(prefs)
menu.append(top)
menu.append(about)
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu, button, time, self.icon)
| [
"gtk.ImageMenuItem",
"gtk.main_quit",
"gtk.MessageDialog",
"gtkwin32.GTKWin32Ext",
"gtk.Menu",
"gtk.gdk.keyval_name",
"gtk.MenuItem"
] | [((726, 849), 'gtk.MessageDialog', 'gtk.MessageDialog', (['widget', '(gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)', 'gtk.MESSAGE_INFO', 'gtk.BUTTONS_OK', 'message'], {}), '(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n gtk.MESSAGE_INFO, gtk.BUTTONS_OK, message)\n', (743, 849), False, 'import gtk\n'), ((1258, 1386), 'gtk.MessageDialog', 'gtk.MessageDialog', (['widget', '(gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)', 'gtk.MESSAGE_ERROR', 'gtk.BUTTONS_CANCEL', 'message'], {}), '(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n gtk.MESSAGE_ERROR, gtk.BUTTONS_CANCEL, message)\n', (1275, 1386), False, 'import gtk\n'), ((1565, 1698), 'gtk.MessageDialog', 'gtk.MessageDialog', (['widget', '(gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)', 'gtk.MESSAGE_WARNING', 'gtk.BUTTONS_OK_CANCEL', 'message'], {}), '(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL, message)\n', (1582, 1698), False, 'import gtk\n'), ((1951, 2082), 'gtk.MessageDialog', 'gtk.MessageDialog', (['widget', '(gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)', 'gtk.MESSAGE_QUESTION', 'gtk.BUTTONS_YES_NO', 'message'], {}), '(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)\n', (1968, 2082), False, 'import gtk\n'), ((9596, 9611), 'gtk.main_quit', 'gtk.main_quit', ([], {}), '()\n', (9609, 9611), False, 'import gtk\n'), ((10020, 10030), 'gtk.Menu', 'gtk.Menu', ([], {}), '()\n', (10028, 10030), False, 'import gtk\n'), ((10050, 10086), 'gtk.ImageMenuItem', 'gtk.ImageMenuItem', (['gtk.STOCK_REFRESH'], {}), '(gtk.STOCK_REFRESH)\n', (10067, 10086), False, 'import gtk\n'), ((10719, 10748), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Always on top"""'], {}), "('Always on top')\n", (10731, 10748), False, 'import gtk\n'), ((10766, 10793), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Preferences"""'], {}), "('Preferences')\n", (10778, 10793), False, 'import gtk\n'), ((10810, 10831), 'gtk.MenuItem', 'gtk.MenuItem', (['"""About"""'], {}), "('About')\n", (10822, 10831), False, 'import gtk\n'), ((10847, 10867), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Quit"""'], {}), "('Quit')\n", (10859, 10867), False, 'import gtk\n'), ((4809, 4845), 'gtkwin32.GTKWin32Ext', 'GTKWin32Ext', (['self.timetracker_window'], {}), '(self.timetracker_window)\n', (4820, 4845), False, 'from gtkwin32 import GTKWin32Ext\n'), ((10220, 10246), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Stop Timer"""'], {}), "('Stop Timer')\n", (10232, 10246), False, 'import gtk\n'), ((6761, 6794), 'gtk.gdk.keyval_name', 'gtk.gdk.keyval_name', (['event.keyval'], {}), '(event.keyval)\n', (6780, 6794), False, 'import gtk\n'), ((10410, 10441), 'gtk.ImageMenuItem', 'gtk.ImageMenuItem', (['gtk.STOCK_NO'], {}), '(gtk.STOCK_NO)\n', (10427, 10441), False, 'import gtk\n'), ((10532, 10564), 'gtk.ImageMenuItem', 'gtk.ImageMenuItem', (['gtk.STOCK_YES'], {}), '(gtk.STOCK_YES)\n', (10549, 10564), False, 'import gtk\n')] |
import sys
import torch.nn as nn
from torchsummary import summary
from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3
from .MyCNN import MyCNN
def VGG19(all=False):
model = vgg19(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.classifier[6] = nn.Linear(4096, 2)
return model
def VGG19_2(all=False):
model = vgg19(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
model.classifier[3] = nn.Linear(4096, 1024)
model.classifier[6] = nn.Linear(1024, 2)
return model
def ResNet(all=False):
model = resnet50(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# 修改全連線層的輸出
model.fc = nn.Linear(2048, 2)
return model
def Densenet(all=False):
model = densenet161()
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# 修改全連線層的輸出
model.classifier = nn.Linear(2208, 2)
return model
def GoogleNet(all=False):
model = googlenet(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.fc = nn.Linear(1024, 2)
return model
def inceptionv3(all=False):
model = inception_v3(pretrained=True, aux_logits=False)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.fc = nn.Linear(2048, 2)
return model
class Model():
model_list = ['VGG19', 'VGG19_2', 'ResNet', 'MyCNN', 'Densenet', 'GoogleNet', 'inceptionv3']
def get_model_list(self):
return self.model_list
def check_model_name(self, name):
if name not in self.model_list:
model_string = '\', \''.join(self.model_list)
sys.exit(f"ModelNameError: '{name}' is not acceptable. The acceptable models are \'{model_string}\'.")
def model_builder(self, model_name, train_all=False):
# check if model name is acceptable
self.check_model_name(model_name)
# load model
model = globals()[model_name](train_all)
return model
if __name__ == '__main__':
# model_list= ['VGG19', 'VGG19_2', 'ResNet', 'MyCNN', 'Densenet', 'GoogleNet', 'inceptionv3']
model = Model().model_builder(Model().get_model_list()[3])
summary(model, input_size=(3,224,224), batch_size=1, device="cpu")
# print(model)
pass | [
"torchvision.models.vgg19",
"torchvision.models.googlenet",
"torchvision.models.densenet161",
"torchvision.models.inception_v3",
"torch.nn.Linear",
"sys.exit",
"torchsummary.summary",
"torchvision.models.resnet50"
] | [((212, 234), 'torchvision.models.vgg19', 'vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (217, 234), False, 'from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3\n'), ((503, 521), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(2)'], {}), '(4096, 2)\n', (512, 521), True, 'import torch.nn as nn\n'), ((578, 600), 'torchvision.models.vgg19', 'vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (583, 600), False, 'from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3\n'), ((743, 764), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(1024)'], {}), '(4096, 1024)\n', (752, 764), True, 'import torch.nn as nn\n'), ((791, 809), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(2)'], {}), '(1024, 2)\n', (800, 809), True, 'import torch.nn as nn\n'), ((865, 890), 'torchvision.models.resnet50', 'resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (873, 890), False, 'from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3\n'), ((1038, 1056), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(2)'], {}), '(2048, 2)\n', (1047, 1056), True, 'import torch.nn as nn\n'), ((1114, 1127), 'torchvision.models.densenet161', 'densenet161', ([], {}), '()\n', (1125, 1127), False, 'from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3\n'), ((1283, 1301), 'torch.nn.Linear', 'nn.Linear', (['(2208)', '(2)'], {}), '(2208, 2)\n', (1292, 1301), True, 'import torch.nn as nn\n'), ((1360, 1386), 'torchvision.models.googlenet', 'googlenet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1369, 1386), False, 'from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3\n'), ((1644, 1662), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(2)'], {}), '(1024, 2)\n', (1653, 1662), True, 'import torch.nn as nn\n'), ((1723, 1770), 'torchvision.models.inception_v3', 'inception_v3', ([], {'pretrained': '(True)', 'aux_logits': '(False)'}), '(pretrained=True, aux_logits=False)\n', (1735, 1770), False, 'from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3\n'), ((2028, 2046), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(2)'], {}), '(2048, 2)\n', (2037, 2046), True, 'import torch.nn as nn\n'), ((2928, 2996), 'torchsummary.summary', 'summary', (['model'], {'input_size': '(3, 224, 224)', 'batch_size': '(1)', 'device': '"""cpu"""'}), "(model, input_size=(3, 224, 224), batch_size=1, device='cpu')\n", (2935, 2996), False, 'from torchsummary import summary\n'), ((2391, 2501), 'sys.exit', 'sys.exit', (['f"""ModelNameError: \'{name}\' is not acceptable. The acceptable models are \'{model_string}\'."""'], {}), '(\n f"ModelNameError: \'{name}\' is not acceptable. The acceptable models are \'{model_string}\'."\n )\n', (2399, 2501), False, 'import sys\n')] |
from selenium import webdriver
from fb_auth import auth
from logger import Logger
def main():
# TODO add possibility to login to different FB accounts (use csv file to store them)
# TODO handle all exceptions especially when account was blocked
# TODO save automatic screenshots from time to time
# TODO add native system logger from previous log parser project
instance = Logger()
log = instance.get_instance()
driver = webdriver.PhantomJS("/home/username/node_modules/phantomjs-prebuilt/bin/phantomjs")
driver.get('https://www.facebook.com/')
auth(driver, log)
a = 1
if __name__ == '__main__':
main()
| [
"selenium.webdriver.PhantomJS",
"fb_auth.auth",
"logger.Logger"
] | [((396, 404), 'logger.Logger', 'Logger', ([], {}), '()\n', (402, 404), False, 'from logger import Logger\n'), ((453, 541), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', (['"""/home/username/node_modules/phantomjs-prebuilt/bin/phantomjs"""'], {}), "(\n '/home/username/node_modules/phantomjs-prebuilt/bin/phantomjs')\n", (472, 541), False, 'from selenium import webdriver\n'), ((586, 603), 'fb_auth.auth', 'auth', (['driver', 'log'], {}), '(driver, log)\n', (590, 603), False, 'from fb_auth import auth\n')] |
import uuid
import random
import os
import math
import numpy as np
import simpy
import matplotlib.pyplot as plt
from simulation.ghostdag import block
from simulation.ghostdag.dag import select_ghostdag_k, DAG
from simulation.fakes import FakeDAG
from simulation.channel import Hub, Channel, PlanarTopology
from simulation.helpers import print_dag, print_stats, save_to_json
from simulation.miner import Miner
from simulation.attack_miners import AttackMiner
simulation_time = 2 ** 12
with_attack = True
print_progress = True
def make_dag(genesis_hash, k):
return DAG(k=k, interval=(0, 2 ** 64 - 1), genesis_hash=genesis_hash)
def make_honest_miner(miner_channel, genesis_hash, k, _lambda, _alpha, miner_index, num_miners):
if miner_index == 0:
dag = make_dag(genesis_hash, k)
else:
dag = FakeDAG(genesis_hash=genesis_hash)
return Miner(k, _lambda, 1 / num_miners, dag, miner_channel)
def make_attack_miner(miner_channel, genesis_hash, k, _lambda, _alpha, miner_index, num_miners):
if miner_index == 0:
dag = make_dag(genesis_hash, k)
miner = Miner(-1, _lambda, (1 - _alpha) / (num_miners - 1), dag, miner_channel)
# Only miner 1 is the actual attacker
elif miner_index == 1:
dag = make_dag(genesis_hash, k)
miner = AttackMiner(-1, _lambda, _alpha, dag, miner_channel)
else:
dag = FakeDAG(genesis_hash=genesis_hash)
miner = Miner(-1, _lambda, (1 - _alpha) / (num_miners - 1), dag, miner_channel)
return miner
class Simulation:
def __init__(self, _alpha, _delta, _lambda, rows=3, cols=3, D_factor=1.0, k=None):
# Communication constants
self.D_min, self.D_max = 0.1, np.sqrt((rows * D_factor) ** 2 + (cols * D_factor) ** 2) + 0.1
# Mining parameters
self._alpha = _alpha
self._delta = _delta
self._lambda = _lambda
if k is None:
self.k = select_ghostdag_k(2 * self.D_max * _lambda, _delta)
else:
self.k = k
# Simulation environment
self.env = simpy.Environment()
# Grid topology
self.topology = PlanarTopology(D_min=self.D_min, D_max=self.D_max)
self.hub = Hub(self.env, latency_oracle=self.topology.latency)
self.channels = []
for r in range(rows):
for c in range(cols):
channel = Channel(self.hub)
self.topology.channel_map[channel] = (r * D_factor, c * D_factor)
self.channels.append(channel)
def run_simulation(self, seed=22522):
# Setup and start the simulation
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if print_progress:
print('\n=========\n')
print('GHOSTDAG simulation')
print('\n=========\n')
genesis_hash = uuid.uuid1().int
miners = []
block.reindex_size_trace = []
for i, channel in enumerate(self.channels):
s = str(self.topology.channel_map[channel])
if print_progress:
print('Miner %d coordinates: %s' % (i, s))
if with_attack:
miner = make_attack_miner(channel, genesis_hash, self.k, self._lambda, self._alpha, i, len(self.channels))
else:
miner = make_honest_miner(channel, genesis_hash, self.k, self._lambda, self._alpha, i, len(self.channels))
self.env.process(miner.mine(self.env))
self.env.process(miner.receive(self.env))
if i == 0 and print_progress:
self.env.process(miner.report(self.env))
miners.append(miner)
if print_progress:
print('\n=========\n')
self.env.run(until=simulation_time)
if print_progress:
print('\n=========\n')
return miners[0].dag
def main():
_lambda, _delta, _alpha = 1, 0.01, 0.01
simulation = Simulation(_alpha, _delta, _lambda)
dag = simulation.run_simulation()
# print_dag(dag)
print('\n=========\n')
# Print stats
print_stats(simulation.D_max, _delta, _lambda, dag, simulation.k)
print('\n=========\n')
plt.figure()
plt.plot(block.reindex_size_trace, linewidth=0.1)
plt.xlabel('time')
plt.ylabel('reindex size')
plt.show()
if not os.path.isdir('data'):
os.mkdir('data')
save_to_json(dag, file_name=os.path.join('data', 'dag.json'))
if __name__ == '__main__':
main()
# try:
# main()
# except Exception as ex:
# print(type(ex).__name__, ex) | [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"simulation.ghostdag.dag.DAG",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"simulation.channel.Hub",
"os.path.isdir",
"os.mkdir",
"numpy.random.seed",
"simpy.Environment",
"uuid.uuid1",
"matplotlib.pyplot.show",
"simulation.helpers.print_st... | [((570, 632), 'simulation.ghostdag.dag.DAG', 'DAG', ([], {'k': 'k', 'interval': '(0, 2 ** 64 - 1)', 'genesis_hash': 'genesis_hash'}), '(k=k, interval=(0, 2 ** 64 - 1), genesis_hash=genesis_hash)\n', (573, 632), False, 'from simulation.ghostdag.dag import select_ghostdag_k, DAG\n'), ((846, 899), 'simulation.miner.Miner', 'Miner', (['k', '_lambda', '(1 / num_miners)', 'dag', 'miner_channel'], {}), '(k, _lambda, 1 / num_miners, dag, miner_channel)\n', (851, 899), False, 'from simulation.miner import Miner\n'), ((3576, 3641), 'simulation.helpers.print_stats', 'print_stats', (['simulation.D_max', '_delta', '_lambda', 'dag', 'simulation.k'], {}), '(simulation.D_max, _delta, _lambda, dag, simulation.k)\n', (3587, 3641), False, 'from simulation.helpers import print_dag, print_stats, save_to_json\n'), ((3669, 3681), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3679, 3681), True, 'import matplotlib.pyplot as plt\n'), ((3683, 3732), 'matplotlib.pyplot.plot', 'plt.plot', (['block.reindex_size_trace'], {'linewidth': '(0.1)'}), '(block.reindex_size_trace, linewidth=0.1)\n', (3691, 3732), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3752), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (3744, 3752), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3780), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reindex size"""'], {}), "('reindex size')\n", (3764, 3780), True, 'import matplotlib.pyplot as plt\n'), ((3782, 3792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3790, 3792), True, 'import matplotlib.pyplot as plt\n'), ((803, 837), 'simulation.fakes.FakeDAG', 'FakeDAG', ([], {'genesis_hash': 'genesis_hash'}), '(genesis_hash=genesis_hash)\n', (810, 837), False, 'from simulation.fakes import FakeDAG\n'), ((1065, 1136), 'simulation.miner.Miner', 'Miner', (['(-1)', '_lambda', '((1 - _alpha) / (num_miners - 1))', 'dag', 'miner_channel'], {}), '(-1, _lambda, (1 - _alpha) / (num_miners - 1), dag, miner_channel)\n', (1070, 1136), False, 'from simulation.miner import Miner\n'), ((1908, 1927), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (1925, 1927), False, 'import simpy\n'), ((1965, 2015), 'simulation.channel.PlanarTopology', 'PlanarTopology', ([], {'D_min': 'self.D_min', 'D_max': 'self.D_max'}), '(D_min=self.D_min, D_max=self.D_max)\n', (1979, 2015), False, 'from simulation.channel import Hub, Channel, PlanarTopology\n'), ((2029, 2080), 'simulation.channel.Hub', 'Hub', (['self.env'], {'latency_oracle': 'self.topology.latency'}), '(self.env, latency_oracle=self.topology.latency)\n', (2032, 2080), False, 'from simulation.channel import Hub, Channel, PlanarTopology\n'), ((3802, 3823), 'os.path.isdir', 'os.path.isdir', (['"""data"""'], {}), "('data')\n", (3815, 3823), False, 'import os\n'), ((3827, 3843), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (3835, 3843), False, 'import os\n'), ((1244, 1296), 'simulation.attack_miners.AttackMiner', 'AttackMiner', (['(-1)', '_lambda', '_alpha', 'dag', 'miner_channel'], {}), '(-1, _lambda, _alpha, dag, miner_channel)\n', (1255, 1296), False, 'from simulation.attack_miners import AttackMiner\n'), ((1312, 1346), 'simulation.fakes.FakeDAG', 'FakeDAG', ([], {'genesis_hash': 'genesis_hash'}), '(genesis_hash=genesis_hash)\n', (1319, 1346), False, 'from simulation.fakes import FakeDAG\n'), ((1357, 1428), 'simulation.miner.Miner', 'Miner', (['(-1)', '_lambda', '((1 - _alpha) / (num_miners - 1))', 'dag', 'miner_channel'], {}), '(-1, _lambda, (1 - _alpha) / (num_miners - 1), dag, miner_channel)\n', (1362, 1428), False, 'from simulation.miner import Miner\n'), ((1793, 1844), 'simulation.ghostdag.dag.select_ghostdag_k', 'select_ghostdag_k', (['(2 * self.D_max * _lambda)', '_delta'], {}), '(2 * self.D_max * _lambda, _delta)\n', (1810, 1844), False, 'from simulation.ghostdag.dag import select_ghostdag_k, DAG\n'), ((2388, 2408), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2402, 2408), True, 'import numpy as np\n'), ((2412, 2429), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2423, 2429), False, 'import random\n'), ((2552, 2564), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (2562, 2564), False, 'import uuid\n'), ((3873, 3905), 'os.path.join', 'os.path.join', (['"""data"""', '"""dag.json"""'], {}), "('data', 'dag.json')\n", (3885, 3905), False, 'import os\n'), ((1608, 1664), 'numpy.sqrt', 'np.sqrt', (['((rows * D_factor) ** 2 + (cols * D_factor) ** 2)'], {}), '((rows * D_factor) ** 2 + (cols * D_factor) ** 2)\n', (1615, 1664), True, 'import numpy as np\n'), ((2165, 2182), 'simulation.channel.Channel', 'Channel', (['self.hub'], {}), '(self.hub)\n', (2172, 2182), False, 'from simulation.channel import Hub, Channel, PlanarTopology\n')] |
from kwmo.controllers.abstract_teambox import *
import time
from kwmo.lib.kwmo_kcd_client import KcdClient
from kwmo.lib.config import get_cached_kcd_external_conf_object
from kfs_lib import *
from kwmo.lib.base import init_session
from kwmo.lib.kwmolib import *
from kwmo.model.user import User
from kwmo.model.kfs_node import KfsNode
from kwmo.model.chat_request import ChatRequest
from kwmo.model.ws_request import WSRequest
import kbase
import simplejson
log = logging.getLogger(__name__)
class SkurlTeamboxController(AbstractTeamboxController):
# Internal: check if workspace is public.
def _check_public(self, workspace_id):
if not c.workspace.public:
log.warning("_check_public(): workspace %i is not public." % ( workspace_id ) )
abort(404)
# Internal: login as a skurl user.
def _login(self, user):
session['user'] = user.to_dict()
session['user_id'] = session['user']['id']
c.perms.allow('kfs.download.share.0')
c.perms.allow('kfs.upload.share.0')
session.save()
# Last minute permissions check.
self._check_perms()
# Internal: set chat request permissions.
def _set_chat_requests_perms(self, flag):
if flag:
# Allow chat requests.
c.perms.allow('pubws.req.chat')
else:
# Deny furthur chat requests.
c.perms.deny('pubws.req.chat')
# Internal: set chat permissions.
def _set_chat_perms(self, flag):
if flag:
# Allow chat.
c.perms.allow('chat.list.channel.' + str(session['user_id']))
c.perms.allow('chat.post.channel.' + str(session['user_id']))
else:
# Deny chat.
c.perms.deny('chat.list.channel.' + str(session['user_id']))
c.perms.deny('chat.post.channel.' + str(session['user_id']))
# Internal: set workspace creation requests permissions.
def _set_ws_creation_requests_perms(self, flag):
if flag:
# Deny furthur workspace creation requests.
c.perms.allow('pubws.req.wscreate')
else:
# Allow workspace requests.
c.perms.deny('pubws.req.wscreate')
# Log user out.
def logout(self, workspace_id, email_id):
log.debug("Skurl logout.")
init_session(c.workspace, reinit=True)
ui_flash_info(code='logout', hide_after_ms=5000)
redirect_to(url('teambox_pubws_show', workspace_id=workspace_id, email_id=email_id))
# Show public workspace main page.
def show(self, workspace_id, email_id):
workspace_id = int(workspace_id)
email_id = int(email_id)
# Set logout url.
c.logout_url = url('teambox_pubws_logout', workspace_id=workspace_id, email_id=email_id)
# Check if the workspace is public.
self._check_public(workspace_id)
if 'email_id' in session and session['email_id'] != email_id:
# User is logged but wants to access a different email. Reinit session.
log.debug("Reinitializing session because user is using another email id: previous='%s', new='%s'." \
% ( str(session['email_id']), str(email_id) ) )
init_session(c.workspace, reinit=True)
notif = request.GET.get('notif', 0)
if notif:
# This is the sender (user 1)... [re-]login automatically.
log.debug("User is accessing a public workspace using a notification link... automatically log user in.")
user = User.get_by(workspace_id=workspace_id, id=1)
log.debug("Reinitializing session because user is logging as user 1 (notif management).")
init_session(c.workspace, reinit=True)
self._login(user)
c.notif_flag = True
else:
if 'user' in session and session['user'] and session['user']['id'] == 1:
# Sender is logged (as a sender) but he's using a regular skurl link: logout.
log.debug("Reinitializing session because user was logged as user 1 but is using a regular skurl link.")
init_session(c.workspace, reinit=True)
if not c.perms.hasRole('skurl'):
# Give skurl role, if not already done.
c.perms.addRole('skurl')
# Save session.
session.save()
if not 'email_id' in session:
# Set email information in session.
# Instantiate a Kcd client.
kc = KcdClient(get_cached_kcd_external_conf_object())
# Check that email ID is valid.
email_info = kc.pubws_get_email_info(workspace_id, email_id)
if not email_info:
log.debug("PubWS: invalild email ID: %i" % ( email_id ) )
abort(404)
# Get the email sender.
sender_user = User.get_by(workspace_id=workspace_id, id=1)
sender = kbase.PropStore()
sender.name = sender_user.real_name
sender.email = sender_user.email
# Get the email recipients (list of PropStores, having name and email keys).
raw_recipients = kc.pubws_get_eid_recipient_identities(workspace_id, email_id)
# Strip sender email from recipients, if needed.
recipients = []
for recipient in raw_recipients:
if recipient.email != sender.email:
recipients.append(recipient)
# Merge sender and recipients.
identities = [sender] + recipients
# Set needed informations in session.
session['email_id'] = email_id
session['email_info'] = email_info.to_dict()
session['identities'] = map(lambda x: x.to_dict(), identities)
session.save()
# Get informations that will be published in the template.
c.dyn_version = 15
c.email_info = session['email_info']
c.json_email_info_str = simplejson.dumps(c.email_info)
c.identities = session['identities']
c.json_identities_str = simplejson.dumps(c.identities)
# Check if a chat request was accepted lately (delay is hardcoded in accepted_lately()).
c.user_id = None
if 'user_id' in session and session['user_id']:
c.user_id = session['user_id']
if ChatRequest.accepted_lately(workspace_id, session['user_id']):
# Deny chat requests and allow chat since a request was accepted lately.
self._set_chat_requests_perms(False)
self._set_chat_perms(True)
else:
# Allow chat requests and deny chat since no request was accepted lately.
self._set_chat_requests_perms(True)
self._set_chat_perms(False)
# Allow workspace creation request.
self._set_ws_creation_requests_perms(True)
# Save session.
session.save()
c.base_url_paths = kurl.get_base_url_paths(
'teambox_updater',
'teambox_post_chat',
'teambox_download',
'teambox_upload',
'teambox_pubws_set_identity',
'teambox_pubws_chat_request',
'teambox_pubws_chat_request_result',
'teambox_pubws_kfsup_request',
'teambox_pubws_kfsdown_request',
'teambox_pubws_create_request')
# Get first update directly.
flags = ( StateRequest.STATE_FORCE_SYNC
| StateRequest.STATE_WANT_PERMS
| StateRequest.STATE_WANT_MEMBERS
| StateRequest.STATE_WANT_KFS
| StateRequest.STATE_WANT_PUBWS_INFO )
params = { }
if 'user_id' in session and session['user_id']:
flags |= StateRequest.STATE_WANT_CHAT
params['chat_channel_id'] = session['user_id']
updater_state_dict = state_request_get(c, session, flags, params)
c.updater_state_json = simplejson.dumps(updater_state_dict)
return render('/teambox/pubwsshow.mako')
# Get a user ID matching the identity ID selected by the user.
# If user is not invited, he is invited first.
@kjsonify
def pb_set_identity(self, workspace_id):
import select
from kcd_lib import WorkspaceInvitee
workspace_id = int(workspace_id)
# Get the workspace.
if not c.workspace.public:
log.warning("pb_set_identity: Workspace %i is not public." % ( workspace_id ) )
abort(404)
# Get POST parameters.
identity_id = request.params['identity_id']
identity_id = int(identity_id)
# Shortcuts
identity = session['identities'][identity_id]
log.debug("Recipient: %s" % ( str(identity) ) )
if identity_id == 0:
# This is the sender (user 1).
user = User.get_by(workspace_id=workspace_id, id=1)
self._login(user)
log.debug("Found matching user(0): '%s'." % ( str(user) ) )
return { 'result' : 'ok', 'user' : session['user'] }
# This is a real recipient... try to get the user.
user = User.get_by(workspace_id=workspace_id, email=identity['email'])
if user:
self._login(user)
log.debug("Found matching user(1): '%s'." % ( str(user) ) )
return { 'result' : 'ok', 'user' : session['user'] }
# Instantiate a Kcd client.
kc = KcdClient(get_cached_kcd_external_conf_object())
# Invite user.
invitee = WorkspaceInvitee(real_name=identity['name'], email_address=identity['email'])
junk_url, invitees = kc.invite_users(workspace_id, "empty message", [invitee])
if invitees[0].error:
log.debug("User could not be invited: '%s'." % ( str(invitees[0].error) ) )
raise Exception('Internal error.')
# Get user. If not present, retry a few times, until new user is fetched by kwsfetcher or until timeout.
wait_seconds = 0.5
timeout_seconds = 8
time_started = time.time()
while 1:
# Get user, if it exists (fetched by kwsfetcher).
user = User.get_by(workspace_id=workspace_id, email=identity['email'])
if user:
self._login(user)
log.debug("Found matching user (2): '%s'." % ( str(user) ) )
return { 'result' : 'ok', 'user' : session['user'] }
# Check for timeout.
if time.time() > time_started + timeout_seconds: break
# Wait
select.select([], [], [], wait_seconds)
# Reached timeout.
log.error("Error: reached end of pb_set_identity(). KWSFetcher might be too loaded or down.");
raise Exception('Temporary server error: please try again later.');
# Internal: do stuff related to every pubws request.
def _request_common(self, workspace_id):
# Check that the user is logged.
if not session['user']:
log.error("_request_common(): user is not logged.")
abort(404)
# Instantiate a Kcd client in the context-global variable.
c.pubws_kc = KcdClient(get_cached_kcd_external_conf_object())
# PubWS chat request.
@kjsonify
def chat_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# Time to allow the workspace owner to respond.
# Keep PubWSChat javascript object code in sync for the global chat
# request timeout (which must be a little longer than this one).
req_timeout = 60
# Shortcuts.
user_id = session['user']['id']
subject = session['email_info']['subject']
# Post request.
chat_req_id = c.pubws_kc.pubws_chat_request(workspace_id, user_id, c.workspace.compat_v2, subject, req_timeout)
log.debug("Chat request: got chat_req_id '%i'." % ( chat_req_id ) )
return { "chat_req_id" : chat_req_id }
# PubWS chat request result request.
@kjsonify
def chat_request_result(self, workspace_id, req_id):
workspace_id = int(workspace_id)
req_id = int(req_id)
req_start_time = request.params['req_start_time']
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# Get the request.
req = ChatRequest.get_by(workspace_id=workspace_id, request_id=req_id)
if req:
# Check request status.
if req.accepted:
# Modify permissions.
self._set_chat_requests_perms(False)
self._set_chat_perms(True)
# Save session.
session.save()
log.debug("chat_request_result(): accepted.")
return { "result" : "ok" }
# Enable when debugging to enable automatic chat acceptation.
if 0:
from kanp import KANP_MINOR
from pylons import config
kc = KcdClient(get_cached_kcd_external_conf_object())
# This function has to be rewritten.
kc.pubws_chat_request_accept(workspace_id, user_id, KANP_MINOR, req_id)
else:
# Bad request ID or kwsfetcher has not yet fetched the request.
pass
log.debug("chat_request_result(): pending, chat_req_id='%s', req_start_time='%s'." \
% ( str(req_id), str(req_start_time) ) )
return { "result" : "pending", "chat_req_id" : req_id, 'req_start_time' : req_start_time }
# PubWS KFS upload request.
@kjsonify
def kfs_upload_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# No-op
return { "result" : "ok" }
# PubWS KFS download request.
@kjsonify
def kfs_download_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# No-op
return { "result" : "ok" }
# PubWS workspace creation request.
@kjsonify
def ws_create_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# Shortcuts.
user_id = session['user']['id']
subject = session['email_info']['subject']
# Post request.
req_id = c.pubws_kc.pubws_workspace_creation_request(workspace_id, user_id, c.workspace.compat_v2, subject)
# Modify permissions.
self._set_ws_creation_requests_perms(False)
# Save permissions.
session.save()
return { "result" : "ready" }
| [
"kcd_lib.WorkspaceInvitee",
"select.select",
"kwmo.model.user.User.get_by",
"kwmo.lib.base.init_session",
"simplejson.dumps",
"kwmo.lib.config.get_cached_kcd_external_conf_object",
"kbase.PropStore",
"kwmo.model.chat_request.ChatRequest.get_by",
"kwmo.model.chat_request.ChatRequest.accepted_lately",... | [((2328, 2366), 'kwmo.lib.base.init_session', 'init_session', (['c.workspace'], {'reinit': '(True)'}), '(c.workspace, reinit=True)\n', (2340, 2366), False, 'from kwmo.lib.base import init_session\n'), ((5974, 6004), 'simplejson.dumps', 'simplejson.dumps', (['c.email_info'], {}), '(c.email_info)\n', (5990, 6004), False, 'import simplejson\n'), ((6082, 6112), 'simplejson.dumps', 'simplejson.dumps', (['c.identities'], {}), '(c.identities)\n', (6098, 6112), False, 'import simplejson\n'), ((8034, 8070), 'simplejson.dumps', 'simplejson.dumps', (['updater_state_dict'], {}), '(updater_state_dict)\n', (8050, 8070), False, 'import simplejson\n'), ((9229, 9292), 'kwmo.model.user.User.get_by', 'User.get_by', ([], {'workspace_id': 'workspace_id', 'email': "identity['email']"}), "(workspace_id=workspace_id, email=identity['email'])\n", (9240, 9292), False, 'from kwmo.model.user import User\n'), ((9618, 9695), 'kcd_lib.WorkspaceInvitee', 'WorkspaceInvitee', ([], {'real_name': "identity['name']", 'email_address': "identity['email']"}), "(real_name=identity['name'], email_address=identity['email'])\n", (9634, 9695), False, 'from kcd_lib import WorkspaceInvitee\n'), ((10149, 10160), 'time.time', 'time.time', ([], {}), '()\n', (10158, 10160), False, 'import time\n'), ((12602, 12666), 'kwmo.model.chat_request.ChatRequest.get_by', 'ChatRequest.get_by', ([], {'workspace_id': 'workspace_id', 'request_id': 'req_id'}), '(workspace_id=workspace_id, request_id=req_id)\n', (12620, 12666), False, 'from kwmo.model.chat_request import ChatRequest\n'), ((3230, 3268), 'kwmo.lib.base.init_session', 'init_session', (['c.workspace'], {'reinit': '(True)'}), '(c.workspace, reinit=True)\n', (3242, 3268), False, 'from kwmo.lib.base import init_session\n'), ((3540, 3584), 'kwmo.model.user.User.get_by', 'User.get_by', ([], {'workspace_id': 'workspace_id', 'id': '(1)'}), '(workspace_id=workspace_id, id=1)\n', (3551, 3584), False, 'from kwmo.model.user import User\n'), ((3699, 3737), 'kwmo.lib.base.init_session', 'init_session', (['c.workspace'], {'reinit': '(True)'}), '(c.workspace, reinit=True)\n', (3711, 3737), False, 'from kwmo.lib.base import init_session\n'), ((4863, 4907), 'kwmo.model.user.User.get_by', 'User.get_by', ([], {'workspace_id': 'workspace_id', 'id': '(1)'}), '(workspace_id=workspace_id, id=1)\n', (4874, 4907), False, 'from kwmo.model.user import User\n'), ((4929, 4946), 'kbase.PropStore', 'kbase.PropStore', ([], {}), '()\n', (4944, 4946), False, 'import kbase\n'), ((6351, 6412), 'kwmo.model.chat_request.ChatRequest.accepted_lately', 'ChatRequest.accepted_lately', (['workspace_id', "session['user_id']"], {}), "(workspace_id, session['user_id'])\n", (6378, 6412), False, 'from kwmo.model.chat_request import ChatRequest\n'), ((8942, 8986), 'kwmo.model.user.User.get_by', 'User.get_by', ([], {'workspace_id': 'workspace_id', 'id': '(1)'}), '(workspace_id=workspace_id, id=1)\n', (8953, 8986), False, 'from kwmo.model.user import User\n'), ((9537, 9574), 'kwmo.lib.config.get_cached_kcd_external_conf_object', 'get_cached_kcd_external_conf_object', ([], {}), '()\n', (9572, 9574), False, 'from kwmo.lib.config import get_cached_kcd_external_conf_object\n'), ((10259, 10322), 'kwmo.model.user.User.get_by', 'User.get_by', ([], {'workspace_id': 'workspace_id', 'email': "identity['email']"}), "(workspace_id=workspace_id, email=identity['email'])\n", (10270, 10322), False, 'from kwmo.model.user import User\n'), ((10671, 10710), 'select.select', 'select.select', (['[]', '[]', '[]', 'wait_seconds'], {}), '([], [], [], wait_seconds)\n', (10684, 10710), False, 'import select\n'), ((11283, 11320), 'kwmo.lib.config.get_cached_kcd_external_conf_object', 'get_cached_kcd_external_conf_object', ([], {}), '()\n', (11318, 11320), False, 'from kwmo.lib.config import get_cached_kcd_external_conf_object\n'), ((4130, 4168), 'kwmo.lib.base.init_session', 'init_session', (['c.workspace'], {'reinit': '(True)'}), '(c.workspace, reinit=True)\n', (4142, 4168), False, 'from kwmo.lib.base import init_session\n'), ((4511, 4548), 'kwmo.lib.config.get_cached_kcd_external_conf_object', 'get_cached_kcd_external_conf_object', ([], {}), '()\n', (4546, 4548), False, 'from kwmo.lib.config import get_cached_kcd_external_conf_object\n'), ((10586, 10597), 'time.time', 'time.time', ([], {}), '()\n', (10595, 10597), False, 'import time\n'), ((13264, 13301), 'kwmo.lib.config.get_cached_kcd_external_conf_object', 'get_cached_kcd_external_conf_object', ([], {}), '()\n', (13299, 13301), False, 'from kwmo.lib.config import get_cached_kcd_external_conf_object\n')] |
import argparse
import os
import cv2
import numpy as np
def gamma_correction(source_path, destination_path, a, b, version):
# Load image into memory
# Algorithm can work correctly with colored and grayscale images
if version == 'colored':
original_image = cv2.imread(source_path)
elif version == 'grayscale':
original_image = cv2.imread(source_path, cv2.IMREAD_GRAYSCALE)
else:
# Other types are not supported
raise RuntimeError('Wrong type of image')
# Apply formula to rescaled image
processed_image = a * ((original_image / 255) ** b)
# Crop values, that are too high
processed_image[processed_image >= 1] = 1
# Scale image back to [0 - 255]
processed_image = processed_image * 255
# Correctly convert float values to integers
processed_image = np.rint(processed_image)
# Convert to `np.uint8`, so `imwrite` will save image correctly
cv2.imwrite(destination_path, processed_image.astype(np.uint8))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform gamma correction.')
parser.add_argument('source_path', metavar='source_path', type=str,
help='Path to the original image.')
parser.add_argument('destination_path', metavar='destination_path', type=str,
help='Path to the processed image.')
parser.add_argument('a', metavar='a', type=float,
help='First parameter of gamma correction algorithm.')
parser.add_argument('b', metavar='b', type=float,
help='Second parameter of gamma correction algorithm.')
parser.add_argument('--version', type=str, default='colored',
help='Shows type of image. Variants: colored / grayscale.')
args = parser.parse_args()
if not os.path.exists(args.source_path):
raise FileNotFoundError
gamma_correction(args.source_path, args.destination_path, args.a, args.b, args.version)
| [
"os.path.exists",
"numpy.rint",
"argparse.ArgumentParser",
"cv2.imread"
] | [((836, 860), 'numpy.rint', 'np.rint', (['processed_image'], {}), '(processed_image)\n', (843, 860), True, 'import numpy as np\n'), ((1040, 1104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform gamma correction."""'}), "(description='Perform gamma correction.')\n", (1063, 1104), False, 'import argparse\n'), ((279, 302), 'cv2.imread', 'cv2.imread', (['source_path'], {}), '(source_path)\n', (289, 302), False, 'import cv2\n'), ((1841, 1873), 'os.path.exists', 'os.path.exists', (['args.source_path'], {}), '(args.source_path)\n', (1855, 1873), False, 'import os\n'), ((361, 406), 'cv2.imread', 'cv2.imread', (['source_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(source_path, cv2.IMREAD_GRAYSCALE)\n', (371, 406), False, 'import cv2\n')] |
__author__ = '<NAME><<EMAIL>>'
import os
from util import YamlFileMaker
from util import QstatParser
from cfnCluster import ConnectionManager
import sys
workspace = "/shared/workspace/Pipelines/"
#log_dir = "/shared/workspace/data_archive/DNASeq/{}/logs"
log_dir = "/shared/workspace/logs/DNASeq/{}"
## executing WGS pipeline with the specific yaml file
def execute(ssh_client, project_name, analysis_steps, s3_input_files_address,
sample_list, group_name, s3_output_files_address, email):
yaml_file = project_name + ".yaml"
global log_dir
log_dir = log_dir.format(project_name)
print("making the yaml file ...")
YamlFileMaker.make_yaml_file(yaml_file, project_name, analysis_steps, s3_input_files_address,
sample_list, group_name, s3_output_files_address, "hg19", "NA")
print("copying yaml files to remote master node...")
ConnectionManager.copy_file(ssh_client, yaml_file, workspace + "yaml_examples")
os.remove(yaml_file)
#if not email == "":
print("executing pipeline...")
ConnectionManager.execute_command(ssh_client, "qsub -o /dev/null -e /dev/null " + workspace + "scripts/run.sh "
+ workspace + "yaml_examples/" + yaml_file + " " + log_dir + " " + "WGSPipeline.py")
## checking your jobs status
def check_status(ssh_client, job_name):
print("checking processing status")
qstat = ConnectionManager.execute_command(ssh_client, "qstat")
job_ids = QstatParser.get_job_ids(qstat)
job_details = [ConnectionManager.execute_command(ssh_client,
"qstat -j %s" % x[0]) for x in job_ids]
job_info = [job_ids[x] + [job_details[x]] for x in range(len(job_ids))]
global log_dir
logs = ConnectionManager.list_dir(ssh_client, log_dir)
QstatParser.parse_qstat(job_info, job_name, logs)
## checking your jobs status
def check_jobs_status(ssh_client):
print("checking jobs status")
ConnectionManager.execute_command(ssh_client, "qstat")
## checking your host status
def check_host_status(ssh_client):
print("checking qhost status")
ConnectionManager.execute_command(ssh_client, "qhost")
| [
"util.YamlFileMaker.make_yaml_file",
"cfnCluster.ConnectionManager.list_dir",
"util.QstatParser.get_job_ids",
"util.QstatParser.parse_qstat",
"cfnCluster.ConnectionManager.copy_file",
"cfnCluster.ConnectionManager.execute_command",
"os.remove"
] | [((657, 822), 'util.YamlFileMaker.make_yaml_file', 'YamlFileMaker.make_yaml_file', (['yaml_file', 'project_name', 'analysis_steps', 's3_input_files_address', 'sample_list', 'group_name', 's3_output_files_address', '"""hg19"""', '"""NA"""'], {}), "(yaml_file, project_name, analysis_steps,\n s3_input_files_address, sample_list, group_name,\n s3_output_files_address, 'hg19', 'NA')\n", (685, 822), False, 'from util import YamlFileMaker\n'), ((896, 975), 'cfnCluster.ConnectionManager.copy_file', 'ConnectionManager.copy_file', (['ssh_client', 'yaml_file', "(workspace + 'yaml_examples')"], {}), "(ssh_client, yaml_file, workspace + 'yaml_examples')\n", (923, 975), False, 'from cfnCluster import ConnectionManager\n'), ((980, 1000), 'os.remove', 'os.remove', (['yaml_file'], {}), '(yaml_file)\n', (989, 1000), False, 'import os\n'), ((1067, 1277), 'cfnCluster.ConnectionManager.execute_command', 'ConnectionManager.execute_command', (['ssh_client', "('qsub -o /dev/null -e /dev/null ' + workspace + 'scripts/run.sh ' +\n workspace + 'yaml_examples/' + yaml_file + ' ' + log_dir + ' ' +\n 'WGSPipeline.py')"], {}), "(ssh_client, \n 'qsub -o /dev/null -e /dev/null ' + workspace + 'scripts/run.sh ' +\n workspace + 'yaml_examples/' + yaml_file + ' ' + log_dir + ' ' +\n 'WGSPipeline.py')\n", (1100, 1277), False, 'from cfnCluster import ConnectionManager\n'), ((1401, 1455), 'cfnCluster.ConnectionManager.execute_command', 'ConnectionManager.execute_command', (['ssh_client', '"""qstat"""'], {}), "(ssh_client, 'qstat')\n", (1434, 1455), False, 'from cfnCluster import ConnectionManager\n'), ((1471, 1501), 'util.QstatParser.get_job_ids', 'QstatParser.get_job_ids', (['qstat'], {}), '(qstat)\n', (1494, 1501), False, 'from util import QstatParser\n'), ((1724, 1771), 'cfnCluster.ConnectionManager.list_dir', 'ConnectionManager.list_dir', (['ssh_client', 'log_dir'], {}), '(ssh_client, log_dir)\n', (1750, 1771), False, 'from cfnCluster import ConnectionManager\n'), ((1777, 1826), 'util.QstatParser.parse_qstat', 'QstatParser.parse_qstat', (['job_info', 'job_name', 'logs'], {}), '(job_info, job_name, logs)\n', (1800, 1826), False, 'from util import QstatParser\n'), ((1930, 1984), 'cfnCluster.ConnectionManager.execute_command', 'ConnectionManager.execute_command', (['ssh_client', '"""qstat"""'], {}), "(ssh_client, 'qstat')\n", (1963, 1984), False, 'from cfnCluster import ConnectionManager\n'), ((2089, 2143), 'cfnCluster.ConnectionManager.execute_command', 'ConnectionManager.execute_command', (['ssh_client', '"""qhost"""'], {}), "(ssh_client, 'qhost')\n", (2122, 2143), False, 'from cfnCluster import ConnectionManager\n'), ((1521, 1588), 'cfnCluster.ConnectionManager.execute_command', 'ConnectionManager.execute_command', (['ssh_client', "('qstat -j %s' % x[0])"], {}), "(ssh_client, 'qstat -j %s' % x[0])\n", (1554, 1588), False, 'from cfnCluster import ConnectionManager\n')] |
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import \
UpdateDiversityFilterDTO
from unittest_reinvent.diversity_filter_tests.test_murcko_scaffold_base import BaseMurckoScaffoldFilter
from unittest_reinvent.diversity_filter_tests.fixtures import tanimoto_scaffold_filter_arrangement
from unittest_reinvent.fixtures.test_data import ASPIRIN
class TestMurckoScaffoldSuperfluousAddition(BaseMurckoScaffoldFilter):
def setUp(self):
super().setUp()
# try to add a smile already present
final_summary = tanimoto_scaffold_filter_arrangement([ASPIRIN], [1.0], [0])
self.update_dto = UpdateDiversityFilterDTO(final_summary, [])
def test_superfluous_addition(self):
self.scaffold_filter.update_score(self.update_dto)
self.assertEqual(2, self.scaffold_filter._diversity_filter_memory.number_of_scaffolds())
| [
"unittest_reinvent.diversity_filter_tests.fixtures.tanimoto_scaffold_filter_arrangement",
"reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto.UpdateDiversityFilterDTO"
] | [((583, 642), 'unittest_reinvent.diversity_filter_tests.fixtures.tanimoto_scaffold_filter_arrangement', 'tanimoto_scaffold_filter_arrangement', (['[ASPIRIN]', '[1.0]', '[0]'], {}), '([ASPIRIN], [1.0], [0])\n', (619, 642), False, 'from unittest_reinvent.diversity_filter_tests.fixtures import tanimoto_scaffold_filter_arrangement\n'), ((669, 712), 'reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto.UpdateDiversityFilterDTO', 'UpdateDiversityFilterDTO', (['final_summary', '[]'], {}), '(final_summary, [])\n', (693, 712), False, 'from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import UpdateDiversityFilterDTO\n')] |
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Msgpack(KaitaiStruct):
"""MessagePack (msgpack) is a system to serialize arbitrary structured
data into a compact binary stream.
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md
"""
SEQ_FIELDS = ["b1", "int_extra", "float_32_value", "float_64_value", "str_len_8", "str_len_16", "str_len_32", "str_value", "num_array_elements_16", "num_array_elements_32", "array_elements", "num_map_elements_16", "num_map_elements_32", "map_elements"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['b1']['start'] = self._io.pos()
self.b1 = self._io.read_u1()
self._debug['b1']['end'] = self._io.pos()
self._debug['int_extra']['start'] = self._io.pos()
_on = self.b1
if _on == 211:
self.int_extra = self._io.read_s8be()
elif _on == 209:
self.int_extra = self._io.read_s2be()
elif _on == 210:
self.int_extra = self._io.read_s4be()
elif _on == 208:
self.int_extra = self._io.read_s1()
elif _on == 205:
self.int_extra = self._io.read_u2be()
elif _on == 207:
self.int_extra = self._io.read_u8be()
elif _on == 204:
self.int_extra = self._io.read_u1()
elif _on == 206:
self.int_extra = self._io.read_u4be()
self._debug['int_extra']['end'] = self._io.pos()
if self.is_float_32:
self._debug['float_32_value']['start'] = self._io.pos()
self.float_32_value = self._io.read_f4be()
self._debug['float_32_value']['end'] = self._io.pos()
if self.is_float_64:
self._debug['float_64_value']['start'] = self._io.pos()
self.float_64_value = self._io.read_f8be()
self._debug['float_64_value']['end'] = self._io.pos()
if self.is_str_8:
self._debug['str_len_8']['start'] = self._io.pos()
self.str_len_8 = self._io.read_u1()
self._debug['str_len_8']['end'] = self._io.pos()
if self.is_str_16:
self._debug['str_len_16']['start'] = self._io.pos()
self.str_len_16 = self._io.read_u2be()
self._debug['str_len_16']['end'] = self._io.pos()
if self.is_str_32:
self._debug['str_len_32']['start'] = self._io.pos()
self.str_len_32 = self._io.read_u4be()
self._debug['str_len_32']['end'] = self._io.pos()
if self.is_str:
self._debug['str_value']['start'] = self._io.pos()
self.str_value = (self._io.read_bytes(self.str_len)).decode(u"UTF-8")
self._debug['str_value']['end'] = self._io.pos()
if self.is_array_16:
self._debug['num_array_elements_16']['start'] = self._io.pos()
self.num_array_elements_16 = self._io.read_u2be()
self._debug['num_array_elements_16']['end'] = self._io.pos()
if self.is_array_32:
self._debug['num_array_elements_32']['start'] = self._io.pos()
self.num_array_elements_32 = self._io.read_u4be()
self._debug['num_array_elements_32']['end'] = self._io.pos()
if self.is_array:
self._debug['array_elements']['start'] = self._io.pos()
self.array_elements = [None] * (self.num_array_elements)
for i in range(self.num_array_elements):
if not 'arr' in self._debug['array_elements']:
self._debug['array_elements']['arr'] = []
self._debug['array_elements']['arr'].append({'start': self._io.pos()})
_t_array_elements = Msgpack(self._io)
_t_array_elements._read()
self.array_elements[i] = _t_array_elements
self._debug['array_elements']['arr'][i]['end'] = self._io.pos()
self._debug['array_elements']['end'] = self._io.pos()
if self.is_map_16:
self._debug['num_map_elements_16']['start'] = self._io.pos()
self.num_map_elements_16 = self._io.read_u2be()
self._debug['num_map_elements_16']['end'] = self._io.pos()
if self.is_map_32:
self._debug['num_map_elements_32']['start'] = self._io.pos()
self.num_map_elements_32 = self._io.read_u4be()
self._debug['num_map_elements_32']['end'] = self._io.pos()
if self.is_map:
self._debug['map_elements']['start'] = self._io.pos()
self.map_elements = [None] * (self.num_map_elements)
for i in range(self.num_map_elements):
if not 'arr' in self._debug['map_elements']:
self._debug['map_elements']['arr'] = []
self._debug['map_elements']['arr'].append({'start': self._io.pos()})
_t_map_elements = self._root.MapTuple(self._io, self, self._root)
_t_map_elements._read()
self.map_elements[i] = _t_map_elements
self._debug['map_elements']['arr'][i]['end'] = self._io.pos()
self._debug['map_elements']['end'] = self._io.pos()
class MapTuple(KaitaiStruct):
SEQ_FIELDS = ["key", "value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['key']['start'] = self._io.pos()
self.key = Msgpack(self._io)
self.key._read()
self._debug['key']['end'] = self._io.pos()
self._debug['value']['start'] = self._io.pos()
self.value = Msgpack(self._io)
self.value._read()
self._debug['value']['end'] = self._io.pos()
@property
def is_array_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_array_32'):
return self._m_is_array_32 if hasattr(self, '_m_is_array_32') else None
self._m_is_array_32 = self.b1 == 221
return self._m_is_array_32 if hasattr(self, '_m_is_array_32') else None
@property
def int_value(self):
if hasattr(self, '_m_int_value'):
return self._m_int_value if hasattr(self, '_m_int_value') else None
if self.is_int:
self._m_int_value = (self.pos_int7_value if self.is_pos_int7 else (self.neg_int5_value if self.is_neg_int5 else 4919))
return self._m_int_value if hasattr(self, '_m_int_value') else None
@property
def str_len(self):
if hasattr(self, '_m_str_len'):
return self._m_str_len if hasattr(self, '_m_str_len') else None
if self.is_str:
self._m_str_len = ((self.b1 & 31) if self.is_fix_str else (self.str_len_8 if self.is_str_8 else (self.str_len_16 if self.is_str_16 else self.str_len_32)))
return self._m_str_len if hasattr(self, '_m_str_len') else None
@property
def is_fix_array(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_fix_array'):
return self._m_is_fix_array if hasattr(self, '_m_is_fix_array') else None
self._m_is_fix_array = (self.b1 & 240) == 144
return self._m_is_fix_array if hasattr(self, '_m_is_fix_array') else None
@property
def is_map(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_map'):
return self._m_is_map if hasattr(self, '_m_is_map') else None
self._m_is_map = ((self.is_fix_map) or (self.is_map_16) or (self.is_map_32))
return self._m_is_map if hasattr(self, '_m_is_map') else None
@property
def is_array(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_array'):
return self._m_is_array if hasattr(self, '_m_is_array') else None
self._m_is_array = ((self.is_fix_array) or (self.is_array_16) or (self.is_array_32))
return self._m_is_array if hasattr(self, '_m_is_array') else None
@property
def is_float(self):
if hasattr(self, '_m_is_float'):
return self._m_is_float if hasattr(self, '_m_is_float') else None
self._m_is_float = ((self.is_float_32) or (self.is_float_64))
return self._m_is_float if hasattr(self, '_m_is_float') else None
@property
def is_str_8(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_str_8'):
return self._m_is_str_8 if hasattr(self, '_m_is_str_8') else None
self._m_is_str_8 = self.b1 == 217
return self._m_is_str_8 if hasattr(self, '_m_is_str_8') else None
@property
def is_fix_map(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_fix_map'):
return self._m_is_fix_map if hasattr(self, '_m_is_fix_map') else None
self._m_is_fix_map = (self.b1 & 240) == 128
return self._m_is_fix_map if hasattr(self, '_m_is_fix_map') else None
@property
def is_int(self):
if hasattr(self, '_m_is_int'):
return self._m_is_int if hasattr(self, '_m_is_int') else None
self._m_is_int = ((self.is_pos_int7) or (self.is_neg_int5))
return self._m_is_int if hasattr(self, '_m_is_int') else None
@property
def is_bool(self):
if hasattr(self, '_m_is_bool'):
return self._m_is_bool if hasattr(self, '_m_is_bool') else None
self._m_is_bool = ((self.b1 == 194) or (self.b1 == 195))
return self._m_is_bool if hasattr(self, '_m_is_bool') else None
@property
def is_str_16(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_str_16'):
return self._m_is_str_16 if hasattr(self, '_m_is_str_16') else None
self._m_is_str_16 = self.b1 == 218
return self._m_is_str_16 if hasattr(self, '_m_is_str_16') else None
@property
def is_float_64(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-float
"""
if hasattr(self, '_m_is_float_64'):
return self._m_is_float_64 if hasattr(self, '_m_is_float_64') else None
self._m_is_float_64 = self.b1 == 203
return self._m_is_float_64 if hasattr(self, '_m_is_float_64') else None
@property
def is_map_16(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_map_16'):
return self._m_is_map_16 if hasattr(self, '_m_is_map_16') else None
self._m_is_map_16 = self.b1 == 222
return self._m_is_map_16 if hasattr(self, '_m_is_map_16') else None
@property
def is_neg_int5(self):
if hasattr(self, '_m_is_neg_int5'):
return self._m_is_neg_int5 if hasattr(self, '_m_is_neg_int5') else None
self._m_is_neg_int5 = (self.b1 & 224) == 224
return self._m_is_neg_int5 if hasattr(self, '_m_is_neg_int5') else None
@property
def pos_int7_value(self):
if hasattr(self, '_m_pos_int7_value'):
return self._m_pos_int7_value if hasattr(self, '_m_pos_int7_value') else None
if self.is_pos_int7:
self._m_pos_int7_value = self.b1
return self._m_pos_int7_value if hasattr(self, '_m_pos_int7_value') else None
@property
def is_nil(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-nil
"""
if hasattr(self, '_m_is_nil'):
return self._m_is_nil if hasattr(self, '_m_is_nil') else None
self._m_is_nil = self.b1 == 192
return self._m_is_nil if hasattr(self, '_m_is_nil') else None
@property
def float_value(self):
if hasattr(self, '_m_float_value'):
return self._m_float_value if hasattr(self, '_m_float_value') else None
if self.is_float:
self._m_float_value = (self.float_32_value if self.is_float_32 else self.float_64_value)
return self._m_float_value if hasattr(self, '_m_float_value') else None
@property
def num_array_elements(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_num_array_elements'):
return self._m_num_array_elements if hasattr(self, '_m_num_array_elements') else None
if self.is_array:
self._m_num_array_elements = ((self.b1 & 15) if self.is_fix_array else (self.num_array_elements_16 if self.is_array_16 else self.num_array_elements_32))
return self._m_num_array_elements if hasattr(self, '_m_num_array_elements') else None
@property
def neg_int5_value(self):
if hasattr(self, '_m_neg_int5_value'):
return self._m_neg_int5_value if hasattr(self, '_m_neg_int5_value') else None
if self.is_neg_int5:
self._m_neg_int5_value = -((self.b1 & 31))
return self._m_neg_int5_value if hasattr(self, '_m_neg_int5_value') else None
@property
def bool_value(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-bool
"""
if hasattr(self, '_m_bool_value'):
return self._m_bool_value if hasattr(self, '_m_bool_value') else None
if self.is_bool:
self._m_bool_value = self.b1 == 195
return self._m_bool_value if hasattr(self, '_m_bool_value') else None
@property
def is_pos_int7(self):
if hasattr(self, '_m_is_pos_int7'):
return self._m_is_pos_int7 if hasattr(self, '_m_is_pos_int7') else None
self._m_is_pos_int7 = (self.b1 & 128) == 0
return self._m_is_pos_int7 if hasattr(self, '_m_is_pos_int7') else None
@property
def is_array_16(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_array_16'):
return self._m_is_array_16 if hasattr(self, '_m_is_array_16') else None
self._m_is_array_16 = self.b1 == 220
return self._m_is_array_16 if hasattr(self, '_m_is_array_16') else None
@property
def is_str(self):
if hasattr(self, '_m_is_str'):
return self._m_is_str if hasattr(self, '_m_is_str') else None
self._m_is_str = ((self.is_fix_str) or (self.is_str_8) or (self.is_str_16) or (self.is_str_32))
return self._m_is_str if hasattr(self, '_m_is_str') else None
@property
def is_fix_str(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_fix_str'):
return self._m_is_fix_str if hasattr(self, '_m_is_fix_str') else None
self._m_is_fix_str = (self.b1 & 224) == 160
return self._m_is_fix_str if hasattr(self, '_m_is_fix_str') else None
@property
def is_str_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_str_32'):
return self._m_is_str_32 if hasattr(self, '_m_is_str_32') else None
self._m_is_str_32 = self.b1 == 219
return self._m_is_str_32 if hasattr(self, '_m_is_str_32') else None
@property
def num_map_elements(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_num_map_elements'):
return self._m_num_map_elements if hasattr(self, '_m_num_map_elements') else None
if self.is_map:
self._m_num_map_elements = ((self.b1 & 15) if self.is_fix_map else (self.num_map_elements_16 if self.is_map_16 else self.num_map_elements_32))
return self._m_num_map_elements if hasattr(self, '_m_num_map_elements') else None
@property
def is_float_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-float
"""
if hasattr(self, '_m_is_float_32'):
return self._m_is_float_32 if hasattr(self, '_m_is_float_32') else None
self._m_is_float_32 = self.b1 == 202
return self._m_is_float_32 if hasattr(self, '_m_is_float_32') else None
@property
def is_map_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_map_32'):
return self._m_is_map_32 if hasattr(self, '_m_is_map_32') else None
self._m_is_map_32 = self.b1 == 223
return self._m_is_map_32 if hasattr(self, '_m_is_map_32') else None
| [
"pkg_resources.parse_version",
"collections.defaultdict"
] | [((253, 278), 'pkg_resources.parse_version', 'parse_version', (['ks_version'], {}), '(ks_version)\n', (266, 278), False, 'from pkg_resources import parse_version\n'), ((281, 301), 'pkg_resources.parse_version', 'parse_version', (['"""0.7"""'], {}), "('0.7')\n", (294, 301), False, 'from pkg_resources import parse_version\n'), ((1101, 1130), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (1124, 1130), False, 'import collections\n'), ((5980, 6009), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (6003, 6009), False, 'import collections\n')] |
from awx.main import signals
class TestCleanupDetachedLabels:
def test_cleanup_detached_labels_on_deleted_parent(self, mocker):
mock_labels = [mocker.MagicMock(), mocker.MagicMock()]
mock_instance = mocker.MagicMock()
mock_instance.labels.all = mocker.MagicMock()
mock_instance.labels.all.return_value = mock_labels
mock_labels[0].is_candidate_for_detach.return_value = True
mock_labels[1].is_candidate_for_detach.return_value = False
signals.cleanup_detached_labels_on_deleted_parent(None, mock_instance)
mock_labels[0].is_candidate_for_detach.assert_called_with()
mock_labels[1].is_candidate_for_detach.assert_called_with()
mock_labels[0].delete.assert_called_with()
mock_labels[1].delete.assert_not_called()
| [
"awx.main.signals.cleanup_detached_labels_on_deleted_parent"
] | [((498, 568), 'awx.main.signals.cleanup_detached_labels_on_deleted_parent', 'signals.cleanup_detached_labels_on_deleted_parent', (['None', 'mock_instance'], {}), '(None, mock_instance)\n', (547, 568), False, 'from awx.main import signals\n')] |
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
class AccidentsData:
def __init__(self):
filename = Path('../data/accidents.csv')
if not filename.exists():
print('\nERROR: Missing dataset file: accidents.csv\n')
quit()
accidents = pd.read_csv(filename)
# Eliminar columnes que preeliminarment es consideren irrellevants
accidents = accidents.drop(columns=['police_force', 'local_authority_district', 'local_authority_highway',
'lsoa_of_accident_location', 'location_easting_osgr',
'location_northing_osgr'])
# One hot encoding
accidents = pd.get_dummies(accidents, columns=['1st_road_class', 'junction_detail', 'junction_control',
'2nd_road_class', 'pedestrian_crossing-human_control',
'pedestrian_crossing-physical_facilities', 'light_conditions',
'road_surface_conditions',
'special_conditions_at_site', 'carriageway_hazards'])
# Eliminar columnes associades a condició de les one hot que són desconegudes
cols_acaben_menysu = []
for colname in accidents.columns:
if colname[-3:] == '_-1':
cols_acaben_menysu.append(colname)
accidents = accidents.drop(columns=cols_acaben_menysu)
numeritza = {'urban_or_rural_area': {'Urban': 1,
'Rural': 0}
}
accidents.replace(numeritza, inplace=True)
# Si no hi ha condició excepcional, irrellevant
accidents = accidents.drop(columns=['special'
'_conditions_at_site_None', 'carriageway_hazards_None',
'1st_road_class_Unclassified',
'2nd_road_class_Unclassified'])
# Convertir hh:mm:00 a minuts desde mitjanit
accidents['time'] = accidents['time'].apply(lambda s: int(s[:-4]) * 60 + int(s[-2:]))
# Convertir aaaa:mm:dd a minuts desde mitjanit
accidents['date'] = accidents['date'].apply(lambda s: int(s[7:9]) + int(s[-2:-1]) * 30.44)
# Substituïr -10s per avg de la columna
accidents['2nd_road_number'].replace(-1, np.nan, inplace=True)
accidents['2nd_road_number'].fillna(accidents['2nd_road_number'].mean(), inplace=True)
# Normalitzat de les columnes que els cal
tobenorm = ['longitude', 'latitude', 'number_of_vehicles', 'number_of_casualties', 'date', 'time',
'1st_road_number',
'road_type', 'speed_limit', '2nd_road_number', 'weather_conditions']
norm = MinMaxScaler()
accidents[tobenorm] = norm.fit_transform(accidents[tobenorm])
#self.features = accidents.drop('target', axis=1)
self.Xtrain, self.Xtest, self.ytrain, self.ytest = train_test_split(accidents.drop('target', axis=1),
accidents['target'], train_size=.7)
def get_Xtrain(self):
return self.Xtrain
def get_Xtest(self):
return self.Xtest
def get_ytrain(self):
return self.ytrain
def get_ytest(self):
return self.ytest
class VehiclesData:
def __init__(self):
filename = Path('../data/vehicles.csv')
if not filename.exists():
print('\nERROR: Missing dataset file: vehicles.csv\n')
quit()
vehicles = pd.read_csv(filename)
vehicles = vehicles.drop(columns=['Vehicle_IMD_Decile'])
vehicles = pd.get_dummies(vehicles, columns=['Vehicle_Type', 'Towing_and_Articulation', 'Vehicle_Manoeuvre',
'Vehicle_Location-Restricted_Lane', 'Junction_Location',
'Skidding_and_Overturning', 'Hit_Object_in_Carriageway',
'Vehicle_Leaving_Carriageway', 'Hit_Object_off_Carriageway',
'1st_Point_of_Impact',
'Journey_Purpose_of_Driver', 'Propulsion_Code',
'Driver_IMD_Decile', 'Driver_Home_Area_Type'])
cols_acabenmenysu = []
for colname in vehicles.columns:
if colname[-3:] == '_-1' or colname[-5:] == '_-1.0':
cols_acabenmenysu.append(colname)
vehicles = vehicles.drop(columns=cols_acabenmenysu)
vehicles = vehicles.drop(vehicles[vehicles.Age_of_Driver < 15].index)
vehicles['Engine_Capacity_(CC)'].replace(-1, np.nan, inplace=True)
vehicles['Engine_Capacity_(CC)'].replace('-1', np.nan, inplace=True)
vehicles['Engine_Capacity_(CC)'].fillna(vehicles['Engine_Capacity_(CC)'].mean(), inplace=True)
vehicles['Age_of_Driver'].replace(-1, np.nan, inplace=True)
vehicles['Age_of_Driver'].replace('-1', np.nan, inplace=True)
vehicles['Age_of_Driver'].fillna(vehicles['Age_of_Driver'].mean(), inplace=True)
vehicles['Age_of_Vehicle'].replace(-1, np.nan, inplace=True)
vehicles['Age_of_Vehicle'].fillna(vehicles['Age_of_Vehicle'].mean(), inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].replace(-1, np.nan, inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].replace('-1', np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace(-1, np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace('-1', np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace('Not known', np.nan, inplace=True)
dicvehicles = {'Sex_of_Driver': {'Male': 1.0, 'Female': 0.0},
'Was_Vehicle_Left_Hand_Drive?': {'Yes': 1.0, 'No': 0.0}
}
vehicles.replace(dicvehicles, inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].fillna(vehicles['Was_Vehicle_Left_Hand_Drive?'].mean(), inplace=True)
vehicles['Sex_of_Driver'].fillna(vehicles['Sex_of_Driver'].mean(), inplace=True)
tobenorm = ['Age_of_Driver', 'Engine_Capacity_(CC)', 'Age_of_Vehicle']
norm = MinMaxScaler()
vehicles[tobenorm] = norm.fit_transform(vehicles[tobenorm])
self.valors = vehicles
def get_valors(self):
return self.valors
class MergedData:
def __init__(self, accidents, vehicles):
acctarg_train = pd.concat([accidents.get_Xtrain(), accidents.get_ytrain()], axis=1)
acctarg_test = pd.concat([accidents.get_Xtest(), accidents.get_ytest()], axis=1)
merged_train = pd.merge(acctarg_train, vehicles.get_valors(), on='accident_id')
merged_test = pd.merge(acctarg_test, vehicles.get_valors(), on='accident_id')
self.target_train = merged_train['target']
self.target_test = merged_test['target']
self.merged_train = merged_train.drop('target', axis=1)
self.merged_test = merged_test.drop('target', axis=1)
def get_merged_train(self):
return self.merged_train
def get_target_train(self):
return self.target_train
def get_merged_test(self):
return self.merged_test
def get_target_test(self):
return self.target_test
| [
"pandas.get_dummies",
"sklearn.preprocessing.MinMaxScaler",
"pandas.read_csv",
"pathlib.Path"
] | [((230, 259), 'pathlib.Path', 'Path', (['"""../data/accidents.csv"""'], {}), "('../data/accidents.csv')\n", (234, 259), False, 'from pathlib import Path\n'), ((401, 422), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (412, 422), True, 'import pandas as pd\n'), ((831, 1141), 'pandas.get_dummies', 'pd.get_dummies', (['accidents'], {'columns': "['1st_road_class', 'junction_detail', 'junction_control', '2nd_road_class',\n 'pedestrian_crossing-human_control',\n 'pedestrian_crossing-physical_facilities', 'light_conditions',\n 'road_surface_conditions', 'special_conditions_at_site',\n 'carriageway_hazards']"}), "(accidents, columns=['1st_road_class', 'junction_detail',\n 'junction_control', '2nd_road_class',\n 'pedestrian_crossing-human_control',\n 'pedestrian_crossing-physical_facilities', 'light_conditions',\n 'road_surface_conditions', 'special_conditions_at_site',\n 'carriageway_hazards'])\n", (845, 1141), True, 'import pandas as pd\n'), ((3025, 3039), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3037, 3039), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3668, 3696), 'pathlib.Path', 'Path', (['"""../data/vehicles.csv"""'], {}), "('../data/vehicles.csv')\n", (3672, 3696), False, 'from pathlib import Path\n'), ((3836, 3857), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (3847, 3857), True, 'import pandas as pd\n'), ((3942, 4356), 'pandas.get_dummies', 'pd.get_dummies', (['vehicles'], {'columns': "['Vehicle_Type', 'Towing_and_Articulation', 'Vehicle_Manoeuvre',\n 'Vehicle_Location-Restricted_Lane', 'Junction_Location',\n 'Skidding_and_Overturning', 'Hit_Object_in_Carriageway',\n 'Vehicle_Leaving_Carriageway', 'Hit_Object_off_Carriageway',\n '1st_Point_of_Impact', 'Journey_Purpose_of_Driver', 'Propulsion_Code',\n 'Driver_IMD_Decile', 'Driver_Home_Area_Type']"}), "(vehicles, columns=['Vehicle_Type', 'Towing_and_Articulation',\n 'Vehicle_Manoeuvre', 'Vehicle_Location-Restricted_Lane',\n 'Junction_Location', 'Skidding_and_Overturning',\n 'Hit_Object_in_Carriageway', 'Vehicle_Leaving_Carriageway',\n 'Hit_Object_off_Carriageway', '1st_Point_of_Impact',\n 'Journey_Purpose_of_Driver', 'Propulsion_Code', 'Driver_IMD_Decile',\n 'Driver_Home_Area_Type'])\n", (3956, 4356), True, 'import pandas as pd\n'), ((6539, 6553), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (6551, 6553), False, 'from sklearn.preprocessing import MinMaxScaler\n')] |
# -*- coding: utf-8 -*
from typing import Dict
from yacs.config import CfgNode
from .backbone import builder as backbone_builder
from .loss import builder as loss_builder
from .task_head import builder as head_builder
from .task_model import builder as task_builder
def build_model(
task: str,
cfg: CfgNode,
):
r"""
Builder function.
Arguments
---------
task: str
builder task name (track|vos)
cfg: CfgNode
buidler configuration
Returns
-------
torch.nn.Module
module built by builder
"""
if task == "track":
backbone = backbone_builder.build(task, cfg.backbone)
losses = loss_builder.build(task, cfg.losses)
head = head_builder.build(task, cfg.task_head)
task_model = task_builder.build(task, cfg.task_model, backbone, head,
losses)
return task_model
else:
print("model for task {} is not complted".format(task))
exit(-1)
def get_config() -> Dict[str, CfgNode]:
r"""
Get available component list config
Returns
-------
Dict[str, CfgNode]
config with list of available components
"""
cfg_dict = {"track": CfgNode(), "vos": CfgNode()}
for task in cfg_dict:
cfg = cfg_dict[task]
cfg["backbone"] = backbone_builder.get_config()[task]
cfg["losses"] = loss_builder.get_config()[task]
cfg["task_model"] = task_builder.get_config()[task]
cfg["task_head"] = head_builder.get_config()[task]
return cfg_dict
| [
"yacs.config.CfgNode"
] | [((1234, 1243), 'yacs.config.CfgNode', 'CfgNode', ([], {}), '()\n', (1241, 1243), False, 'from yacs.config import CfgNode\n'), ((1252, 1261), 'yacs.config.CfgNode', 'CfgNode', ([], {}), '()\n', (1259, 1261), False, 'from yacs.config import CfgNode\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import signal
import abc
import logging
from time import sleep
from multiprocessing import Process
from argparse import ArgumentParser
from levitas.lib.modificationmonitor import ModificationMonitor
from .settings import SettingMissing
log = logging.getLogger("levitas.lib.daemonize")
def cli(daemon_class, daemon_args=[], daemon_kwargs={}, umask=0):
"""
Command-line interface to control a daemon.
@param daemon_class: Subclass of L{AbstractDaemon}.
@param daemon_args: Arguments to instantiate the daemon.
@param daemon_kwargs: Named arguments to instantiate the daemon.
@param umask: file mode creation mask.
"""
name = os.path.basename(sys.argv[0])
options = CLIOptions(name)
try:
options.parse_args()
except CLIOptionError as err:
sys.stderr.write(str(err))
sys.exit(1)
sys.stdout.write("%s %s: " % (options.action or "start", name))
if options.reloader and "MODIFICATIONMONITOR_STARTED" not in os.environ:
sys.stdout.write("Start ModificationMonitor\n")
ModificationMonitor()
sys.exit(0)
try:
dz = Daemonizer(daemon_class,
chdir=os.getcwd(),
umask=umask,
daemon_args=daemon_args,
daemon_kwargs=daemon_kwargs)
if dz.do_action(options.action, options.pidfile):
sys.stdout.write("done\n")
return True
else:
sys.stdout.write("failed\n")
return False
except SettingMissing as err:
sys.stderr.write(err)
class AbstractDaemon:
metaclass = abc.ABCMeta
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def stop(self):
pass
class Daemonizer(Process):
def __init__(self, daemon_class,
chdir="/", umask=0,
daemon_args=[], daemon_kwargs={}):
if not issubclass(daemon_class, AbstractDaemon):
raise TypeError("%s is not subclass of %s"
% (str(daemon_class), str(AbstractDaemon)))
Process.__init__(self)
self.daemon_class = daemon_class
self.chdir = chdir
self.umask = umask
self.daemon_args = daemon_args
self.daemon_kwargs = daemon_kwargs
self.pidfile = None
self.daemon_process = None
self._daemonize = False
def read_pidfile(self):
try:
f = open(self.pidfile, "r")
pid = int(f.read().strip())
f.close()
except IOError:
pid = None
return pid
def do_action(self, action, pidfile):
if action not in ["start", "stop", "restart", "foreground"]:
action = "foreground"
self.pidfile = pidfile
if pidfile is not None:
pid = self.read_pidfile()
else:
pid = None
if action == "start":
return self.do_start_action(pid)
elif action == "stop":
return self.do_stop_action(pid)
elif action == "restart":
if self.do_stop_action(pid):
pid = self.read_pidfile()
return self.do_start_action(pid)
else:
return False
elif action == "foreground":
# Start as a subprocess without making a daemon
self.start()
return True
def do_start_action(self, pid):
if pid:
msg = "Start aborted, pid-file '%s' exist.\n"
sys.stderr.write(msg % self.pidfile)
return False
self._daemonize = True
self.start()
return True
def do_stop_action(self, pid):
if not pid:
msg = "Could not stop process, missing pid-file '%s'.\n"
sys.stderr.write(msg % self.pidfile)
return False
try:
while True:
os.kill(pid, signal.SIGTERM)
sleep(0.1)
except OSError:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return True
def setsignals(self):
signal.signal(signal.SIGTERM, self.sigexit)
signal.signal(signal.SIGHUP, self.sigexit)
signal.signal(signal.SIGINT, self.sigexit)
signal.signal(signal.SIGQUIT, self.sigexit)
def sigexit(self, sig, frame):
log.debug("Stop process")
self.daemon_process.stop()
sys.exit(0)
def run(self):
# Make a daemon
if self._daemonize:
self.daemonize()
try:
self.start_process()
except:
raise
def start_process(self):
self.setsignals()
os.chdir(self.chdir)
self.daemon_process = self.daemon_class(*self.daemon_args,
**self.daemon_kwargs)
self.daemon_process.start()
def daemonize(self):
pid = os.fork()
if pid != 0:
# Parent
os._exit(0)
# Child
os.close(0)
sys.stdin = sys.__stdin__ = open("/dev/null")
os.chdir(self.chdir)
os.umask(self.umask)
os.setsid()
pid = str(os.getpid())
if self.pidfile:
f = file(self.pidfile, "w+")
f.write("%s\n" % pid)
f.close()
class CLIOptionError(Exception):
pass
class CLIOptions(object):
def __init__(self, name):
self.name = name
self.parser = ArgumentParser()
self.pidfile = None
self.action = None
self.parser.add_argument("action", type=str, nargs='?',
choices=["start", "stop", "restart", "foreground"])
self.parser.add_argument("-l", "--logfile",
dest="logfile",
type=str,
help="Path to logfile (optional)")
self.parser.add_argument("-c", "--logfilecount",
dest="logfilecount",
type=int, default=0,
help="Count of old logfiles to be saved. (default: 0)")
self.parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="vebose output")
self.parser.add_argument("-s", "--SETTINGS",
dest="settings_module",
type=str,
help="SETTINGS module (required)",
metavar="SETTINGS_MODULE")
self.parser.add_argument("-r", "--RELOADER",
dest="reloader",
action="store_true",
help="Start with autoreloader")
self.parser.add_argument("-p", "--pidfile",
dest="pidfile",
type=str,
default="/var/run/%s.pid" % self.name,
help="pidfile")
def parse_args(self):
args = self.parser.parse_args()
logfile = args.logfile
logfilecount = args.logfilecount
self.pidfile = args.pidfile
self.action = args.action or "foreground"
self.reloader = args.reloader
if hasattr(args, "settings_module"):
if args.settings_module:
os.environ["LEVITAS_SETTINGS"] = args.settings_module
else:
self.parser.print_help()
msg = "option --setting required \n\n"
raise CLIOptionError(msg)
if self.action == "start":
self._initLogging(args.verbose, logfile, logfilecount)
elif self.action == "foreground":
if logfile is None:
logfile = "console"
self._initLogging(args.verbose, logfile, logfilecount)
def _initLogging(self, verbose=False, logfile=None, logfilecount=0):
log = logging.getLogger()
if logfile == "console":
h = logging.StreamHandler()
elif logfile is not None:
from logging.handlers import RotatingFileHandler
doRotation = True if os.path.exists(logfile) else False
h = RotatingFileHandler(logfile, backupCount=logfilecount)
if doRotation:
h.doRollover()
else:
return
if verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s "
"- %(levelname)s - %(message)s")
h.setFormatter(formatter)
log.addHandler(h)
| [
"logging.getLogger",
"logging.StreamHandler",
"time.sleep",
"sys.exit",
"os.fork",
"os.remove",
"os.path.exists",
"os.kill",
"argparse.ArgumentParser",
"os.umask",
"os.getpid",
"os.close",
"logging.handlers.RotatingFileHandler",
"sys.stderr.write",
"os.setsid",
"signal.signal",
"mult... | [((878, 920), 'logging.getLogger', 'logging.getLogger', (['"""levitas.lib.daemonize"""'], {}), "('levitas.lib.daemonize')\n", (895, 920), False, 'import logging\n'), ((1298, 1327), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1314, 1327), False, 'import os\n'), ((1500, 1563), 'sys.stdout.write', 'sys.stdout.write', (["('%s %s: ' % (options.action or 'start', name))"], {}), "('%s %s: ' % (options.action or 'start', name))\n", (1516, 1563), False, 'import sys\n'), ((1654, 1701), 'sys.stdout.write', 'sys.stdout.write', (['"""Start ModificationMonitor\n"""'], {}), "('Start ModificationMonitor\\n')\n", (1670, 1701), False, 'import sys\n'), ((1710, 1731), 'levitas.lib.modificationmonitor.ModificationMonitor', 'ModificationMonitor', ([], {}), '()\n', (1729, 1731), False, 'from levitas.lib.modificationmonitor import ModificationMonitor\n'), ((1740, 1751), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1748, 1751), False, 'import sys\n'), ((2789, 2811), 'multiprocessing.Process.__init__', 'Process.__init__', (['self'], {}), '(self)\n', (2805, 2811), False, 'from multiprocessing import Process\n'), ((4871, 4914), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.sigexit'], {}), '(signal.SIGTERM, self.sigexit)\n', (4884, 4914), False, 'import signal\n'), ((4923, 4965), 'signal.signal', 'signal.signal', (['signal.SIGHUP', 'self.sigexit'], {}), '(signal.SIGHUP, self.sigexit)\n', (4936, 4965), False, 'import signal\n'), ((4974, 5016), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.sigexit'], {}), '(signal.SIGINT, self.sigexit)\n', (4987, 5016), False, 'import signal\n'), ((5025, 5068), 'signal.signal', 'signal.signal', (['signal.SIGQUIT', 'self.sigexit'], {}), '(signal.SIGQUIT, self.sigexit)\n', (5038, 5068), False, 'import signal\n'), ((5190, 5201), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5198, 5201), False, 'import sys\n'), ((5455, 5475), 'os.chdir', 'os.chdir', (['self.chdir'], {}), '(self.chdir)\n', (5463, 5475), False, 'import os\n'), ((5697, 5706), 'os.fork', 'os.fork', ([], {}), '()\n', (5704, 5706), False, 'import os\n'), ((5806, 5817), 'os.close', 'os.close', (['(0)'], {}), '(0)\n', (5814, 5817), False, 'import os\n'), ((5880, 5900), 'os.chdir', 'os.chdir', (['self.chdir'], {}), '(self.chdir)\n', (5888, 5900), False, 'import os\n'), ((5909, 5929), 'os.umask', 'os.umask', (['self.umask'], {}), '(self.umask)\n', (5917, 5929), False, 'import os\n'), ((5938, 5949), 'os.setsid', 'os.setsid', ([], {}), '()\n', (5947, 5949), False, 'import os\n'), ((6275, 6291), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (6289, 6291), False, 'from argparse import ArgumentParser\n'), ((8947, 8966), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8964, 8966), False, 'import logging\n'), ((9524, 9597), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (9541, 9597), False, 'import logging\n'), ((1479, 1490), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1487, 1490), False, 'import sys\n'), ((2056, 2082), 'sys.stdout.write', 'sys.stdout.write', (['"""done\n"""'], {}), "('done\\n')\n", (2072, 2082), False, 'import sys\n'), ((2133, 2161), 'sys.stdout.write', 'sys.stdout.write', (['"""failed\n"""'], {}), "('failed\\n')\n", (2149, 2161), False, 'import sys\n'), ((2229, 2250), 'sys.stderr.write', 'sys.stderr.write', (['err'], {}), '(err)\n', (2245, 2250), False, 'import sys\n'), ((4257, 4293), 'sys.stderr.write', 'sys.stderr.write', (['(msg % self.pidfile)'], {}), '(msg % self.pidfile)\n', (4273, 4293), False, 'import sys\n'), ((4532, 4568), 'sys.stderr.write', 'sys.stderr.write', (['(msg % self.pidfile)'], {}), '(msg % self.pidfile)\n', (4548, 4568), False, 'import sys\n'), ((5761, 5772), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (5769, 5772), False, 'import os\n'), ((5977, 5988), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5986, 5988), False, 'import os\n'), ((9016, 9039), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (9037, 9039), False, 'import logging\n'), ((1834, 1845), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1843, 1845), False, 'import os\n'), ((4647, 4675), 'os.kill', 'os.kill', (['pid', 'signal.SIGTERM'], {}), '(pid, signal.SIGTERM)\n', (4654, 4675), False, 'import os\n'), ((4692, 4702), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (4697, 4702), False, 'from time import sleep\n'), ((4742, 4770), 'os.path.exists', 'os.path.exists', (['self.pidfile'], {}), '(self.pidfile)\n', (4756, 4770), False, 'import os\n'), ((9219, 9273), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['logfile'], {'backupCount': 'logfilecount'}), '(logfile, backupCount=logfilecount)\n', (9238, 9273), False, 'from logging.handlers import RotatingFileHandler\n'), ((4788, 4811), 'os.remove', 'os.remove', (['self.pidfile'], {}), '(self.pidfile)\n', (4797, 4811), False, 'import os\n'), ((9168, 9191), 'os.path.exists', 'os.path.exists', (['logfile'], {}), '(logfile)\n', (9182, 9191), False, 'import os\n')] |
"""Class implementation for the stop_propagation interface.
"""
from apysc._type.variable_name_interface import VariableNameInterface
class StopPropagationInterface(VariableNameInterface):
def stop_propagation(self) -> None:
"""
Stop event propagation.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.stop_propagation, locals_=locals(),
module_name=__name__, class_=StopPropagationInterface):
expression: str = (
f'{self.variable_name}.stopPropagation();'
)
ap.append_js_expression(expression=expression)
| [
"apysc.append_js_expression"
] | [((618, 664), 'apysc.append_js_expression', 'ap.append_js_expression', ([], {'expression': 'expression'}), '(expression=expression)\n', (641, 664), True, 'import apysc as ap\n')] |
# import json
import uuid
from django.apps import apps
from django.core import serializers
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.shortcuts import redirect
from django.conf import settings
from .api_helpers import *
Episode = apps.get_model('episodes', 'Episode')
SubscriptionRequest = apps.get_model('subscribers', 'SubscriptionRequest')
Subscriber = apps.get_model('subscribers', 'Subscriber')
def get_episodes(request):
episodes = Episode.objects.all()
res = serializers.serialize("json", episodes)
return HttpResponse(res, content_type='application/json')
@csrf_exempt
def create_new_subscription_request(request):
if not valid_method('POST', request):
return error_response('Error: Method must be POST', 405)
email = request.POST.get('email', False)
if not email:
return error_response('Error: No email provided in request', 422)
subscription_request, created_new = SubscriptionRequest.objects.get_or_create(
email=email)
if not created_new:
subscription_request.token = uuid.uuid4()
subscription_request.save()
if send_confirmation_email(subscription_request):
return response('Email sent to ' + email)
else:
return error_response('Unable to send email to ' + email, 500)
def create_subscriber(request):
email = request.GET.get('email', False)
token = request.GET.get('token', False)
if (not email or not token):
return error_response("Error: Unable to process request. Missing information", 422)
subscription_request = SubscriptionRequest.objects.get(email=email, token=token)
if not subscription_request:
return error_response("Error: Subscription request not found", 404)
subscriber, created_new = Subscriber.objects.get_or_create(email=email)
if created_new:
exists = 'False'
else:
exists = 'True'
return redirect('/thanks/?email=' + email + '&exists=' + exists)
def thanks(request):
root = settings.HOST_URL
email = request.GET.get('email', False)
exists = request.GET.get('exists', False)
return render(request,
'api/thanks.html',
{
'email': email,
'root': root,
'exists': exists
})
| [
"django.shortcuts.render",
"django.http.HttpResponse",
"uuid.uuid4",
"django.shortcuts.redirect",
"django.core.serializers.serialize",
"django.apps.apps.get_model"
] | [((328, 365), 'django.apps.apps.get_model', 'apps.get_model', (['"""episodes"""', '"""Episode"""'], {}), "('episodes', 'Episode')\n", (342, 365), False, 'from django.apps import apps\n'), ((388, 440), 'django.apps.apps.get_model', 'apps.get_model', (['"""subscribers"""', '"""SubscriptionRequest"""'], {}), "('subscribers', 'SubscriptionRequest')\n", (402, 440), False, 'from django.apps import apps\n'), ((454, 497), 'django.apps.apps.get_model', 'apps.get_model', (['"""subscribers"""', '"""Subscriber"""'], {}), "('subscribers', 'Subscriber')\n", (468, 497), False, 'from django.apps import apps\n'), ((574, 613), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', 'episodes'], {}), "('json', episodes)\n", (595, 613), False, 'from django.core import serializers\n'), ((625, 675), 'django.http.HttpResponse', 'HttpResponse', (['res'], {'content_type': '"""application/json"""'}), "(res, content_type='application/json')\n", (637, 675), False, 'from django.http import HttpResponse\n'), ((1987, 2044), 'django.shortcuts.redirect', 'redirect', (["('/thanks/?email=' + email + '&exists=' + exists)"], {}), "('/thanks/?email=' + email + '&exists=' + exists)\n", (1995, 2044), False, 'from django.shortcuts import redirect\n'), ((2185, 2273), 'django.shortcuts.render', 'render', (['request', '"""api/thanks.html"""', "{'email': email, 'root': root, 'exists': exists}"], {}), "(request, 'api/thanks.html', {'email': email, 'root': root, 'exists':\n exists})\n", (2191, 2273), False, 'from django.shortcuts import render\n'), ((1146, 1158), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1156, 1158), False, 'import uuid\n')] |
import torch
import torch.nn as nn
from diversebranchblock import DiverseBranchBlock
from acb import ACBlock
from dbb_transforms import transI_fusebn
CONV_BN_IMPL = 'base'
DEPLOY_FLAG = False
class ConvBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, deploy=False, nonlinear=None):
super().__init__()
if nonlinear is None:
self.nonlinear = nn.Identity()
else:
self.nonlinear = nonlinear
if deploy:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True)
else:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
if hasattr(self, 'bn'):
return self.nonlinear(self.bn(self.conv(x)))
else:
return self.nonlinear(self.conv(x))
def switch_to_deploy(self):
kernel, bias = transI_fusebn(self.conv.weight, self.bn)
conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size,
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True)
conv.weight.data = kernel
conv.bias.data = bias
for para in self.parameters():
para.detach_()
self.__delattr__('conv')
self.__delattr__('bn')
self.conv = conv
def conv_bn(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1):
if CONV_BN_IMPL == 'base' or kernel_size == 1 or kernel_size >= 7:
blk_type = ConvBN
elif CONV_BN_IMPL == 'ACB':
blk_type = ACBlock
else:
blk_type = DiverseBranchBlock
return blk_type(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, deploy=DEPLOY_FLAG)
def conv_bn_relu(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1):
if CONV_BN_IMPL == 'base' or kernel_size == 1 or kernel_size >= 7:
blk_type = ConvBN
elif CONV_BN_IMPL == 'ACB':
blk_type = ACBlock
else:
blk_type = DiverseBranchBlock
return blk_type(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, deploy=DEPLOY_FLAG, nonlinear=nn.ReLU())
def switch_conv_bn_impl(block_type):
assert block_type in ['base', 'DBB', 'ACB']
global CONV_BN_IMPL
CONV_BN_IMPL = block_type
def switch_deploy_flag(deploy):
global DEPLOY_FLAG
DEPLOY_FLAG = deploy
print('deploy flag: ', DEPLOY_FLAG)
def build_model(arch):
if arch == 'ResNet-18':
from resnet import create_Res18
model = create_Res18()
elif arch == 'ResNet-50':
from resnet import create_Res50
model = create_Res50()
elif arch == 'MobileNet':
from mobilenet import create_MobileNet
model = create_MobileNet()
else:
raise ValueError('TODO')
return model | [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"resnet.create_Res50",
"torch.nn.Conv2d",
"resnet.create_Res18",
"mobilenet.create_MobileNet",
"dbb_transforms.transI_fusebn",
"torch.nn.Identity"
] | [((1321, 1361), 'dbb_transforms.transI_fusebn', 'transI_fusebn', (['self.conv.weight', 'self.bn'], {}), '(self.conv.weight, self.bn)\n', (1334, 1361), False, 'from dbb_transforms import transI_fusebn\n'), ((1377, 1625), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.conv.in_channels', 'out_channels': 'self.conv.out_channels', 'kernel_size': 'self.conv.kernel_size', 'stride': 'self.conv.stride', 'padding': 'self.conv.padding', 'dilation': 'self.conv.dilation', 'groups': 'self.conv.groups', 'bias': '(True)'}), '(in_channels=self.conv.in_channels, out_channels=self.conv.\n out_channels, kernel_size=self.conv.kernel_size, stride=self.conv.\n stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=\n self.conv.groups, bias=True)\n', (1386, 1625), True, 'import torch.nn as nn\n'), ((3288, 3302), 'resnet.create_Res18', 'create_Res18', ([], {}), '()\n', (3300, 3302), False, 'from resnet import create_Res18\n'), ((464, 477), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (475, 477), True, 'import torch.nn as nn\n'), ((574, 747), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'bias': '(True)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding, dilation=dilation, groups=\n groups, bias=True)\n', (583, 747), True, 'import torch.nn as nn\n'), ((814, 988), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding, dilation=dilation, groups=\n groups, bias=False)\n', (823, 988), True, 'import torch.nn as nn\n'), ((1045, 1086), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (1059, 1086), True, 'import torch.nn as nn\n'), ((2906, 2915), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2913, 2915), True, 'import torch.nn as nn\n'), ((3389, 3403), 'resnet.create_Res50', 'create_Res50', ([], {}), '()\n', (3401, 3403), False, 'from resnet import create_Res50\n'), ((3497, 3515), 'mobilenet.create_MobileNet', 'create_MobileNet', ([], {}), '()\n', (3513, 3515), False, 'from mobilenet import create_MobileNet\n')] |
from dataclasses import dataclass, field
from typing import Dict
import perde
import pytest
from util import FORMATS, FORMATS_EXCEPT
"""rust
#[derive(Serialize, Debug, new)]
struct Plain {
a: String,
b: String,
c: u64,
}
add!(Plain {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_plain(m):
@dataclass
class Plain:
a: str
b: str
c: int
m.repack_type(Plain)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAll {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAll {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAll:
pen_pineapple: str
apple_pen: str
m.repack_type(RenameAll)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize", rename_all = "PascalCase")]
struct RenameAllSerializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize")]
struct RenameAllSerializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllSerializeInput {"--".into(), "==".into()});
add!(RenameAllSerializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_serialize(m):
@perde.attr(rename_all_serialize="PascalCase")
@dataclass
class RenameAllSerialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllSerializeInput", astype=RenameAllSerialize)
v = m.dumps(d)
e = m.data("RenameAllSerializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize")]
struct RenameAllDeserializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize", rename_all = "SCREAMING_SNAKE_CASE")]
struct RenameAllDeserializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllDeserializeInput {"--".into(), "==".into()});
add!(RenameAllDeserializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_deserialize(m):
@perde.attr(rename_all_deserialize="SCREAMING_SNAKE_CASE")
@dataclass
class RenameAllDeserialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllDeserializeInput", astype=RenameAllDeserialize)
v = m.dumps(d)
e = m.data("RenameAllDeserializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
struct DenyUnknownFields {
x: String,
y: i64,
z: i64,
q: String,
}
add!(DenyUnknownFields {"aaaaa".into(), 1, -2, "unknown".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_deny_unknown_fields(m):
@dataclass
class NoDenyUnknownFields:
x: str
y: int
z: int
@perde.attr(deny_unknown_fields=True)
@dataclass
class DenyUnknownFields:
x: str
y: int
z: int
e = m.unpack_data("DenyUnknownFields", astype=NoDenyUnknownFields)
assert e == NoDenyUnknownFields("aaaaa", 1, -2)
with pytest.raises(Exception) as e:
m.unpack_data("DenyUnknownFields", astype=DenyUnknownFields)
print(f"{e}")
"""rust
#[derive(Serialize, Debug, new)]
struct Rename {
a: String,
#[serde(rename = "x")]
b: String,
c: u64,
}
add!(Rename {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename(m):
@dataclass
class Rename:
a: str
b: str = field(metadata={"perde_rename": "x"})
c: int
m.repack_type(Rename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAllRename {
pen_pineapple: String,
#[serde(rename = "pen_pen")]
apple_pen: String,
}
add!(RenameAllRename {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_in_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAllRename:
pen_pineapple: str
apple_pen: str = field(metadata={"perde_rename": "pen_pen"})
m.repack_type(RenameAllRename)
"""rust
#[derive(Serialize, Debug, new)]
struct NestedRenameChild {
a: String,
#[serde(rename = "d")]
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRename {
x: String,
#[serde(rename = "w")]
y: NestedRenameChild,
z: i64,
}
add!(NestedRename
{"xxx".into(),
NestedRenameChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename(m):
@dataclass
class NestedRenameChild:
a: str
b: str = field(metadata={"perde_rename": "d"})
@dataclass
class NestedRename:
x: str
y: NestedRenameChild = field(metadata={"perde_rename": "w"})
z: int
m.repack_type(NestedRename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "UPPERCASE")]
struct NestedRenameAllChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRenameAll {
x: String,
y: NestedRenameAllChild,
z: i64,
}
add!(NestedRenameAll
{"xxx".into(),
NestedRenameAllChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename_all(m):
@perde.attr(rename_all="UPPERCASE")
@dataclass
class NestedRenameAllChild:
a: str
b: str
@dataclass
class NestedRenameAll:
x: str
y: NestedRenameAllChild
z: int
m.repack_type(NestedRenameAll)
"""rust
#[derive(Serialize, Debug, new)]
struct FlattenChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct Flatten {
x: String,
#[serde(flatten)]
y: FlattenChild,
z: i64,
}
add!(Flatten
{"xxx".into(),
FlattenChild::new("ppp".into(), "qqq".into()),
1111}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_flatten(m):
@dataclass
class FlattenChild:
a: str
b: str
@dataclass
class Flatten:
x: str
y: FlattenChild = field(metadata={"perde_flatten": True})
z: int
m.repack_type(Flatten)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten {
x: String,
y: i64,
#[serde(flatten)]
z: IndexMap<String, String>,
}
add!(DictFlatten {"hey".into(), -103223,
{
let mut m = IndexMap::new();
m.insert("pp".into(), "q1".into());
m.insert("ppp".into(), "q2".into());
m.insert("pppp".into(), "q3".into());
m
}}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten(m):
@dataclass
class DictFlatten:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten)
"""rust
#[derive(Serialize, Debug, new)]
struct Flatten2 {
x: String,
a: i64,
b: i64,
}
add!(Flatten2 { "haa".into(), 11, 33 });
"""
@pytest.mark.parametrize("m", FORMATS)
def test_flatten2(m):
@dataclass
class Flatten2Child:
a: int
b: int
@dataclass
class Flatten2:
x: str
y: Flatten2Child = field(metadata={"perde_flatten": True})
m.repack_type(Flatten2)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten2 {
x: String,
y: i64,
pp: String,
ppp: String,
pppp: String,
}
add!(DictFlatten2 {
"hey".into(), -103223,
"q1".into(), "q2".into(), "q3".into()
});
"""
# Hopefully support msgpack.
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten2(m):
@dataclass
class DictFlatten2:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten2)
| [
"util.FORMATS_EXCEPT",
"pytest.mark.parametrize",
"pytest.raises",
"perde.attr",
"dataclasses.field"
] | [((281, 318), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (304, 318), False, 'import pytest\n'), ((640, 677), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (663, 677), False, 'import pytest\n'), ((1330, 1367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (1353, 1367), False, 'import pytest\n'), ((2199, 2236), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (2222, 2236), False, 'import pytest\n'), ((2789, 2826), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (2812, 2826), False, 'import pytest\n'), ((3510, 3547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (3533, 3547), False, 'import pytest\n'), ((3955, 3992), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (3978, 3992), False, 'import pytest\n'), ((7160, 7197), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m"""', 'FORMATS'], {}), "('m', FORMATS)\n", (7183, 7197), False, 'import pytest\n'), ((707, 741), 'perde.attr', 'perde.attr', ([], {'rename_all': '"""camelCase"""'}), "(rename_all='camelCase')\n", (717, 741), False, 'import perde\n'), ((1407, 1452), 'perde.attr', 'perde.attr', ([], {'rename_all_serialize': '"""PascalCase"""'}), "(rename_all_serialize='PascalCase')\n", (1417, 1452), False, 'import perde\n'), ((2278, 2335), 'perde.attr', 'perde.attr', ([], {'rename_all_deserialize': '"""SCREAMING_SNAKE_CASE"""'}), "(rename_all_deserialize='SCREAMING_SNAKE_CASE')\n", (2288, 2335), False, 'import perde\n'), ((2957, 2993), 'perde.attr', 'perde.attr', ([], {'deny_unknown_fields': '(True)'}), '(deny_unknown_fields=True)\n', (2967, 2993), False, 'import perde\n'), ((4032, 4066), 'perde.attr', 'perde.attr', ([], {'rename_all': '"""camelCase"""'}), "(rename_all='camelCase')\n", (4042, 4066), False, 'import perde\n'), ((4660, 4682), 'util.FORMATS_EXCEPT', 'FORMATS_EXCEPT', (['"""toml"""'], {}), "('toml')\n", (4674, 4682), False, 'from util import FORMATS, FORMATS_EXCEPT\n'), ((5476, 5510), 'perde.attr', 'perde.attr', ([], {'rename_all': '"""UPPERCASE"""'}), "(rename_all='UPPERCASE')\n", (5486, 5510), False, 'import perde\n'), ((5416, 5438), 'util.FORMATS_EXCEPT', 'FORMATS_EXCEPT', (['"""toml"""'], {}), "('toml')\n", (5430, 5438), False, 'from util import FORMATS, FORMATS_EXCEPT\n'), ((6096, 6121), 'util.FORMATS_EXCEPT', 'FORMATS_EXCEPT', (['"""msgpack"""'], {}), "('msgpack')\n", (6110, 6121), False, 'from util import FORMATS, FORMATS_EXCEPT\n'), ((6794, 6819), 'util.FORMATS_EXCEPT', 'FORMATS_EXCEPT', (['"""msgpack"""'], {}), "('msgpack')\n", (6808, 6819), False, 'from util import FORMATS, FORMATS_EXCEPT\n'), ((7737, 7762), 'util.FORMATS_EXCEPT', 'FORMATS_EXCEPT', (['"""msgpack"""'], {}), "('msgpack')\n", (7751, 7762), False, 'from util import FORMATS, FORMATS_EXCEPT\n'), ((3216, 3240), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3229, 3240), False, 'import pytest\n'), ((3633, 3670), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_rename': 'x'}"}), "(metadata={'perde_rename': 'x'})\n", (3638, 3670), False, 'from dataclasses import dataclass, field\n'), ((4161, 4204), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_rename': 'pen_pen'}"}), "(metadata={'perde_rename': 'pen_pen'})\n", (4166, 4204), False, 'from dataclasses import dataclass, field\n'), ((4787, 4824), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_rename': 'd'}"}), "(metadata={'perde_rename': 'd'})\n", (4792, 4824), False, 'from dataclasses import dataclass, field\n'), ((4911, 4948), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_rename': 'w'}"}), "(metadata={'perde_rename': 'w'})\n", (4916, 4948), False, 'from dataclasses import dataclass, field\n'), ((6289, 6328), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_flatten': True}"}), "(metadata={'perde_flatten': True})\n", (6294, 6328), False, 'from dataclasses import dataclass, field\n'), ((6943, 6982), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_flatten': True}"}), "(metadata={'perde_flatten': True})\n", (6948, 6982), False, 'from dataclasses import dataclass, field\n'), ((7368, 7407), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_flatten': True}"}), "(metadata={'perde_flatten': True})\n", (7373, 7407), False, 'from dataclasses import dataclass, field\n'), ((7888, 7927), 'dataclasses.field', 'field', ([], {'metadata': "{'perde_flatten': True}"}), "(metadata={'perde_flatten': True})\n", (7893, 7927), False, 'from dataclasses import dataclass, field\n')] |
from nifcloud import session
import sys
# --- define --------
# -- Server -------
SERVER_NAME = "testsv"
# --------------------
# -- PRIVATE NW -------
PRIVATE_NW_NAME = 'test'
PRIVATE_NW_IP = 'static'
# --------------------
# -------------------
# ------ update attribute --------------------
def wait_for_instance_running(client, instance_name):
print("wait : ", sys._getframe().f_code.co_name)
try:
waiter = client.get_waiter('instance_running')
wait_result = waiter.wait(
InstanceId=[instance_name, ],
Tenancy=['all', ],
WaiterConfig={
'Delay': 30,
'MaxAttempts': 40
}
)
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
finally:
return wait_result
def wait_for_instance_warning(client):
print("wait : ", sys._getframe().f_code.co_name)
try:
waiter = client.get_waiter('instance_warning')
wait_result = waiter.wait(
InstanceId=[SERVER_NAME, ],
Tenancy=['all', ],
WaiterConfig={
'Delay': 30,
'MaxAttempts': 40
}
)
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
finally:
return wait_result
# ---- change the private Network to which the private NIC is connected
def update_private_network(client, server_name):
try:
"""
client.nifty_update_instance_network_interfaces(
# Target Instance Name
InstanceId='string',
# After Network Config
NetworkInterface=[
{
#Select Setting Network.Exclusive NetworkName
'NetworkId' : 'string',
#Select Setting Network.Exclusive NetwokId
'NetworkName' : 'string',
#See also NetworkInterface.n.IpAddress in
#https://pfs.nifcloud.com/api/rest/NiftyUpdateInstanceNetworkInterfaces.htm
'IpAddress' : 'string',
},
],
# Reboot Option
# force:Force reboot
# true:Normal ACPI Reboot(default)
# false:Not Reboot
NiftyReboot='true',
)
"""
client.nifty_update_instance_network_interfaces(
InstanceId=server_name,
# After Network Config
NetworkInterface=[
{
'NetworkName': PRIVATE_NW_NAME,
'IpAddress': PRIVATE_NW_IP,
},
],
NiftyReboot='true',
)
print("Private Network Change")
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
sys.exit(1)
# -------------- main ----------------
client = session.get_session().create_client(
"computing",
region_name="jp-east-2",
)
update_private_network(client)
| [
"nifcloud.session.get_session",
"sys._getframe",
"sys.exit"
] | [((2927, 2948), 'nifcloud.session.get_session', 'session.get_session', ([], {}), '()\n', (2946, 2948), False, 'from nifcloud import session\n'), ((2865, 2876), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2873, 2876), False, 'import sys\n'), ((374, 389), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (387, 389), False, 'import sys\n'), ((896, 911), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (909, 911), False, 'import sys\n'), ((762, 777), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (775, 777), False, 'import sys\n'), ((1282, 1297), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (1295, 1297), False, 'import sys\n'), ((2825, 2840), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (2838, 2840), False, 'import sys\n')] |
import requests
import pprint
import json
# Suppress ssl verification warning
requests.packages.urllib3.disable_warnings()
s = requests.Session()
s.auth = ("user", "password")
s.verify = False
host = "localhost"
apis = ["https://api.mercedes-benz.com/vehicledata/v2/vehicles", "https://api.mercedes-benz.com/vehicledata/v2/vehicles" , "https://api.mercedes-benz.com/hazard_warnings/v2", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles"]
licenses = [["Fuel Status", "https://developer.mercedes-benz.com/products/hazard_warnings/details" ],
["Electric Vehicle Status", "https://developer.mercedes-benz.com/products/electric_vehicle_status/details" ],
["Hazard Warnings", "https://developer.mercedes-benz.com/products/hazard_warnings/details" ],
["Fuel Status Tryout", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles" ],
["Electric Vehicle Status Tryout", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles" ]]
offers = [
{
"title": "Fuel Status",
"description": "The Fuel Status data set provides fuel level and the remaining vehicle range of connected vehicles. Applications from fuel suppliers could give Mercedes-Benz drivers individual offers at the right time.",
"keywords": [
"Fuel Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/fuel_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/fuel_status",
"Mantainer": "http://eccenca.com",
"Contact": "ed<EMAIL>"
},
{
"title": "Electric Vehicle Status",
"description": "The Electric Vehicle Status data set provides charge and remaining range of a specific electric vehicle. Knowing these current values, the next charging stop can be predicted.",
"keywords": [
"Electric Vehicle Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/electric_vehicle_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/electric_vehicle_status",
"Mantainer": "http://eccenca.com",
"Contact": "<EMAIL>"
},
{
"title": "Hazard Warnings",
"description": "Benefit from aggregated event data from our connected vehicle fleet to alert your drivers ahead of any dangerous situation. The data set consists of different types of safety-related events, ranging from dangerous traffic events to weather conditions.",
"keywords": [
"Hazard Warnings"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/hazard_warnings/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/hazard_warnings",
"Mantainer": "http://eccenca.com",
"Contact": "<EMAIL>"
},
{
"title": "Fuel Status Tryout",
"description": "This is a sandbox for Fuel Status data set provides fuel level and the remaining vehicle range of connected vehicles. Applications from fuel suppliers could give Mercedes-Benz drivers individual offers at the right time.",
"keywords": [
"Fuel Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/fuel_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/fuel_status",
"Mantainer": "http://eccenca.com",
"Contact": "<EMAIL>"
},
{
"title": "Electric Vehicle Status Tryout",
"description": "This is a sandbox for Electric Vehicle Status data set provides charge and remaining range of a specific electric vehicle. Knowing these current values, the next charging stop can be predicted.",
"keywords": [
"Electric Vehicle Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/electric_vehicle_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/electric_vehicle_status",
"Mantainer": "http://eccenca.com",
"Contact": "<EMAIL>"
}
]
representations = [{
"title": "Fuel Status",
"description": "Data representation of Fuel Status data.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/fuel-status.json"
},
{
"title": "Electric Vehicle Status",
"description": "Data representation of Electric Vehicle Status.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/electric-vehicle-status.json"
},
{
"title": "Hazard Warnings",
"description": "Data representation of Hazard Warnings data.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/harzard-warnings.json"
},
{
"title": "Fuel Status Tyout",
"description": "Data representation of Fuel Status data.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/fuel-status.json"
},
{
"title": "Electric Vehicle Status Tryout",
"description": "Data representation of Electric Vehicle Status.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/electric-vehicle-status.json"
}
]
def create_policy(title, desc):
value = f'''{{
"@context" : {{
"ids" : "http://w3id.org/idsa/core/",
"idsc" : "http://w3id.org/idsa/code/"
}},
"@type": "ids:Permission",
"@id": "http://w3id.org/idsa/autogen/permission/c0bdb9d5-e86a-4bb3-86d2-2b1dc9d226f5",
"ids:description": [
{{
"@value": "This polcy allows the usage of the API under the respective ",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}}
],
"ids:title": [
{{
"@value": "Free for usage",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}}
],
"ids:action": [
{{
"@id": "idsc:USE"
}}
]
}}'''
svalue = {
"value": """{
"@context" : {
"ids" : "https://w3id.org/idsa/core/",
"idsc" : "https://w3id.org/idsa/code/"
},
"@type": "ids:Permission",
"@id": "https://w3id.org/idsa/autogen/permission/154df1cf-557b-4f44-b839-4b68056606a2",
"ids:description": [
{
"@value": "Free for Usage",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}
],
"ids:title": [
{
"@value": "This policy allows the data set usage by any third-party under the restrictions pre-established by the data provider Mercedes-Benz.",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}
],
"ids:action": [
{
"@id": "idsc:USE"
}
]
}"""
}
parsedJSON = json.loads(value)
return s.post(
"https://" + host + "/api/rules",
json=svalue
).headers["Location"]
def get_objects(object):
return s.get(
"https://" + host + "/api/" + object + "s?page=0&size=30"
)
def create_remote_artifact(endpoint):
return s.post(
"https://" + host + "/api/artifacts",
json={"accessUrl": endpoint }
).headers["Location"]
def create_offered_resource(resource):
return s.post("https://" + host + "/api/offers", json=resource).headers["Location"]
def add_resource_to_catalog(catalog, resource):
s.post(catalog + "/offers", json=[resource])
def add_catalog_to_resource(resource, catalog):
s.post(resource + "/catalogs", json=[catalog])
def add_representation_to_resource(resource, representation):
s.post(resource + "/representations", json=[representation])
def add_artifact_to_representation(representation, artifact):
s.post(representation + "/artifacts", json=[artifact])
def add_contract_to_resource(resource, contract):
s.post(resource + "/contracts", json=[contract])
def add_rule_to_contract(contract, rule):
s.post(contract + "/rules", json=[rule])
def create_offered_resource(resource):
return s.post("https://" + host + "/api/offers", json=resource).headers["Location"]
def create_representation(representation):
return s.post("https://" + host + "/api/representations", json=representation).headers[
"Location"
]
def create_contract():
return s.post("https://" + host + "/api/contracts", json={}).headers["Location"]
def create_catalog():
return s.post("https://" + host + "/api/catalogs", json={}).headers["Location"]
def remove(object_href):
return s.delete(object_href)
def remove_uuid(offer_href, uuid):
return s.delete(offer_href, json={'id' : uuid})
def remove(object, objects):
current_objects = json.loads(objects.text)
for current_object in current_objects["_embedded"][object + 's']:
object_href = current_object["_links"]["self"]["href"]
print("Removing " + object + " " + object_href)
remove(object_href)
def remove_object_uuid(object, objects):
current_objects = json.loads(objects.text)
for current_object in current_objects["_embedded"][object + 's']:
object_href = current_object["_links"]["self"]["href"]
print("Removing " + object + " " + object_href)
uuid = object_href.rindex("/",1)
remove_uuid(object_href, uuid)
# Cleaning dataset
object_response = get_objects("catalog")
remove_object_uuid("catalog", object_response)
object_response = get_objects("offer")
remove_object_uuid("resource", object_response)
object_response = get_objects("artifact")
remove_object_uuid("artifact", object_response)
object_response = get_objects("representation")
remove_object_uuid("representation", object_response)
object_response = get_objects("contract")
remove_object_uuid("contract", object_response)
i = 0
catalog = create_catalog()
policy = create_policy(licenses[i][0] + " Usage Policy", "For more details visit " + licenses[i][1])
contract = create_contract()
print("Adding APIS to IDS Catalog:" + catalog)
for api in apis:
offer = create_offered_resource(offers[i])
representation = create_representation(representations[i])
artifact = create_remote_artifact(api)
add_resource_to_catalog(catalog, offer)
add_representation_to_resource(offer, representation)
add_artifact_to_representation(representation, artifact)
add_contract_to_resource(offer, contract)
add_rule_to_contract(contract, policy)
print("Registering " + licenses[i][0] + " in " + artifact )
i = i + 1
| [
"json.loads",
"requests.packages.urllib3.disable_warnings",
"requests.Session"
] | [((80, 124), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (122, 124), False, 'import requests\n'), ((130, 148), 'requests.Session', 'requests.Session', ([], {}), '()\n', (146, 148), False, 'import requests\n'), ((8861, 8878), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (8871, 8878), False, 'import json\n'), ((10740, 10764), 'json.loads', 'json.loads', (['objects.text'], {}), '(objects.text)\n', (10750, 10764), False, 'import json\n'), ((11030, 11054), 'json.loads', 'json.loads', (['objects.text'], {}), '(objects.text)\n', (11040, 11054), False, 'import json\n')] |
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from ..utils.data import read_sentences
logger = logging.getLogger('nmtpytorch')
class TextDataset(Dataset):
r"""A PyTorch dataset for sentences.
Arguments:
fname (str or Path): A string or ``pathlib.Path`` object giving
the corpus.
vocab (Vocabulary): A ``Vocabulary`` instance for the given corpus.
bos (bool, optional): If ``True``, a special beginning-of-sentence
"<bos>" marker will be prepended to sentences.
"""
def __init__(self, fname, vocab, bos=False, **kwargs):
self.path = Path(fname)
self.vocab = vocab
self.bos = bos
# Detect glob patterns
self.fnames = sorted(self.path.parent.glob(self.path.name))
if len(self.fnames) == 0:
raise RuntimeError('{} does not exist.'.format(self.path))
elif len(self.fnames) > 1:
logger.info('Multiple files found, using first: {}'.format(self.fnames[0]))
# Read the sentences and map them to vocabulary
self.data, self.lengths = read_sentences(
self.fnames[0], self.vocab, bos=self.bos)
# Dataset size
self.size = len(self.data)
@staticmethod
def to_torch(batch):
return pad_sequence(
[torch.tensor(b, dtype=torch.long) for b in batch], batch_first=False)
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return self.size
def __repr__(self):
s = "{} '{}' ({} sentences)\n".format(
self.__class__.__name__, self.fnames[0].name, self.__len__())
return s
| [
"logging.getLogger",
"torch.tensor",
"pathlib.Path"
] | [((211, 242), 'logging.getLogger', 'logging.getLogger', (['"""nmtpytorch"""'], {}), "('nmtpytorch')\n", (228, 242), False, 'import logging\n'), ((724, 735), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (728, 735), False, 'from pathlib import Path\n'), ((1421, 1454), 'torch.tensor', 'torch.tensor', (['b'], {'dtype': 'torch.long'}), '(b, dtype=torch.long)\n', (1433, 1454), False, 'import torch\n')] |
import glob
import nibabel as nib
import pdb
nii_files = glob.glob('./train3d/*.nii')
for nii_file in nii_files:
nii = nib.load(nii_file)
nib.save(nii, nii_file[:-4] + '_0000.nii.gz')
print(nii_file[:-4] + '.nii.gz')
| [
"nibabel.save",
"glob.glob",
"nibabel.load"
] | [((58, 86), 'glob.glob', 'glob.glob', (['"""./train3d/*.nii"""'], {}), "('./train3d/*.nii')\n", (67, 86), False, 'import glob\n'), ((122, 140), 'nibabel.load', 'nib.load', (['nii_file'], {}), '(nii_file)\n', (130, 140), True, 'import nibabel as nib\n'), ((142, 187), 'nibabel.save', 'nib.save', (['nii', "(nii_file[:-4] + '_0000.nii.gz')"], {}), "(nii, nii_file[:-4] + '_0000.nii.gz')\n", (150, 187), True, 'import nibabel as nib\n')] |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'NAIF PDS4 Bundler'
copyright = '2021 California Institute of Technology'
author = '<NAME>'
# Obtain version from NPB
version_file = "../../src/pds/naif_pds4_bundler/VERSION.txt"
with open(version_file, 'r') as v:
for line in v:
if line:
# The full version, including alpha/beta/rc tags.
release = line
version = '.'.join(release.split('.')[0:-1])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.autosectionlabel',
'sphinx_rtd_theme'
]
# Make sure the target is unique
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NAIF PDS4 Bundler'
copyright = u'2021, Caltech/JPL/NASA'
author = u'<NAME>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'NAIFPDS4BundlerDoc'
#html_logo = '_static/images/PDS_Planets.png'
#
#html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'naif_pds4_bundler.tex', u'NAIF PDS4 Bundler Documentation',
u'<NAME>', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'naifpds4bundler',
u'NAIF PDS4 Bundler Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NAIFPDS4Bundler',
u'NAIF PDS4 Bundler Documentation',
author, 'NAIFPDS4Bundler', 'Generates a PDS4 SPICE kernel archive.',
'Miscellaneous'),
]
| [
"os.path.abspath"
] | [((593, 613), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (608, 613), False, 'import os\n')] |
import os
import json
from github3.pulls import PullFile
from github3.repos.commit import RepoCommit
def load_fixture(filename):
path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(path, 'fixtures', filename)
fh = open(filename, 'r')
return fh.read()
def create_pull_files(data):
return [PullFile(f) for f in json.loads(data)]
def create_commits(data):
return [RepoCommit(f) for f in json.loads(data)]
| [
"json.loads",
"os.path.join",
"github3.repos.commit.RepoCommit",
"os.path.abspath",
"github3.pulls.PullFile"
] | [((200, 240), 'os.path.join', 'os.path.join', (['path', '"""fixtures"""', 'filename'], {}), "(path, 'fixtures', filename)\n", (212, 240), False, 'import os\n'), ((158, 183), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'import os\n'), ((334, 345), 'github3.pulls.PullFile', 'PullFile', (['f'], {}), '(f)\n', (342, 345), False, 'from github3.pulls import PullFile\n'), ((413, 426), 'github3.repos.commit.RepoCommit', 'RepoCommit', (['f'], {}), '(f)\n', (423, 426), False, 'from github3.repos.commit import RepoCommit\n'), ((355, 371), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (365, 371), False, 'import json\n'), ((436, 452), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (446, 452), False, 'import json\n')] |
#
# One-liner implementation of cPickle
#
from pickle import *
from pickle import __doc__, __version__, format_version, compatible_formats
BadPickleGet = KeyError
UnpickleableError = PicklingError
# ____________________________________________________________
# XXX some temporary dark magic to produce pickled dumps that are
# closer to the ones produced by cPickle in CPython
from pickle import StringIO
PythonPickler = Pickler
class Pickler(PythonPickler):
def __init__(self, *args, **kw):
self.__f = None
if len(args) == 1 and isinstance(args[0], int):
self.__f = StringIO()
PythonPickler.__init__(self, self.__f, args[0], **kw)
else:
PythonPickler.__init__(self, *args, **kw)
def memoize(self, obj):
self.memo[None] = None # cPickle starts counting at one
return PythonPickler.memoize(self, obj)
def getvalue(self):
return self.__f and self.__f.getvalue()
def dump(obj, file, protocol=None, bin=None):
Pickler(file, protocol, bin).dump(obj)
def dumps(obj, protocol=None, bin=None):
file = StringIO()
Pickler(file, protocol, bin).dump(obj)
return file.getvalue()
| [
"pickle.StringIO"
] | [((1125, 1135), 'pickle.StringIO', 'StringIO', ([], {}), '()\n', (1133, 1135), False, 'from pickle import StringIO\n'), ((609, 619), 'pickle.StringIO', 'StringIO', ([], {}), '()\n', (617, 619), False, 'from pickle import StringIO\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import paddle
import paddle.fluid as fluid
import numpy as np
import paddle.distributed.fleet as fleet
class Trainer(object):
def __init__(self):
"""
"""
self.place = None
class CPUTrainer(Trainer):
def __init__(self):
super(CPUTrainer, self).__init__()
self.place = fluid.CPUPlace()
self.exe = fluid.Executor(self.place)
def fit(self, model, dataloader, epoch, start_step=10):
fleet.init_worker()
self.exe.run(fluid.default_startup_program())
for epoch_id in range(epoch):
total_time = 0
step = 0
for data in dataloader():
if step > start_step:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[model.loss.name])
if step > start_step:
end_time = time.time()
total_time += (end_time - start_time)
print(
"worker_index: %d, step%d, train_loss: %f, total time cost = %f, step per second: %f, speed: %f"
% (fleet.worker_index(), step, loss[0], total_time,
(step - start_step) / total_time,
1 / (end_time - start_time)))
step += 1
fleet.stop_worker()
class MultiGPUTrainer(Trainer):
def __init__(self):
super(MultiGPUTrainer, self).__init__()
self.place = fluid.CUDAPlace(
int(os.environ.get('FLAGS_selected_gpus', 0)))
self.exe = fluid.Executor(self.place)
self.exe.run(fluid.default_startup_program())
def fit(self, model, dataloader, epoch, use_dali=False, start_step=10):
for epoch_id in range(epoch):
total_time = 0
step = 0
for data in dataloader:
if step > start_step:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[model.loss.name],
use_program_cache=True)
if step > start_step:
end_time = time.time()
total_time += (end_time - start_time)
print(
"epoch id: %d, step%d, train_loss: %f, total time cost = %f, step per second: %f, speed: %f"
% (epoch_id, step, loss[0], total_time,
(step - start_step) / total_time,
1 / (end_time - start_time)))
step += 1
if use_dali:
dataloader.reset()
def val(self,
model,
dataloader,
target_list,
current_epoch=-1,
use_dali=False):
self.test_program = model.main_prog.clone(for_test=True)
fetch_target = []
results = {}
for item in target_list:
if item in model.target.keys():
fetch_target.append(model.target[item].name)
results[item] = []
else:
raise Exception("ERROR: Current model only support target: {}".
format(model.target.keys()))
for data in dataloader:
result = self.exe.run(self.test_program,
feed=data,
fetch_list=fetch_target,
use_program_cache=True)
for item in target_list:
results[item].append(np.mean(result[target_list.index(item)]))
log_info = ""
for item in target_list:
log_info += ", {} = {}".format(item, np.mean(results[item]))
if current_epoch > 0:
print("Test Epoch {}{}".format(current_epoch, log_info))
else:
print("Test Result {}".format(log_info))
if use_dali:
dataloader.reset()
def quick_benchmark(self,
model,
dataloader,
start_step=20,
end_step=200):
step = 0
total_time = 0
total_step = 0
counting_time = False
for data in dataloader:
if step > start_step and step <= end_step:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[],
use_program_cache=True)
if step > start_step and step <= end_step:
end_time = time.time()
total_time += (end_time - start_time)
if step > end_step:
break
step += 1
mean_qps = (end_step - start_step) / total_time
return mean_qps
def benchmark(self,
model,
dataloader,
epoch,
use_dali=False,
start_step=20):
for epoch_id in range(epoch):
total_time = 0
step = 0
for data in dataloader:
if step > start_step and step <= start_step + 100:
start_time = time.time()
loss = self.exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[model.loss.name],
use_program_cache=True)
if step > start_step and step <= start_step + 100:
end_time = time.time()
total_time += (end_time - start_time)
step += 1
average_speed = 100 / total_time
if use_dali:
dataloader.reset()
return average_speed
def benchmark_val(self, model, dataloader, target_list, use_dali=False):
self.test_program = model.main_prog.clone(for_test=True)
fetch_target = []
results = {}
for item in target_list:
if item in model.target.keys():
fetch_target.append(model.target[item].name)
results[item] = []
else:
raise Exception("ERROR: Current model only support target: {}".
format(model.target.keys()))
for data in dataloader:
result = self.exe.run(self.test_program,
feed=data,
fetch_list=fetch_target,
use_program_cache=True)
for item in target_list:
results[item].append(np.mean(result[target_list.index(item)]))
if use_dali:
dataloader.reset()
return results
| [
"numpy.mean",
"paddle.distributed.fleet.worker_index",
"paddle.fluid.default_startup_program",
"paddle.fluid.CPUPlace",
"os.environ.get",
"paddle.fluid.default_main_program",
"paddle.fluid.Executor",
"paddle.distributed.fleet.stop_worker",
"paddle.distributed.fleet.init_worker",
"time.time"
] | [((960, 976), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (974, 976), True, 'import paddle.fluid as fluid\n'), ((996, 1022), 'paddle.fluid.Executor', 'fluid.Executor', (['self.place'], {}), '(self.place)\n', (1010, 1022), True, 'import paddle.fluid as fluid\n'), ((1092, 1111), 'paddle.distributed.fleet.init_worker', 'fleet.init_worker', ([], {}), '()\n', (1109, 1111), True, 'import paddle.distributed.fleet as fleet\n'), ((2069, 2088), 'paddle.distributed.fleet.stop_worker', 'fleet.stop_worker', ([], {}), '()\n', (2086, 2088), True, 'import paddle.distributed.fleet as fleet\n'), ((2311, 2337), 'paddle.fluid.Executor', 'fluid.Executor', (['self.place'], {}), '(self.place)\n', (2325, 2337), True, 'import paddle.fluid as fluid\n'), ((1133, 1164), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (1162, 1164), True, 'import paddle.fluid as fluid\n'), ((2359, 2390), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (2388, 2390), True, 'import paddle.fluid as fluid\n'), ((2249, 2289), 'os.environ.get', 'os.environ.get', (['"""FLAGS_selected_gpus"""', '(0)'], {}), "('FLAGS_selected_gpus', 0)\n", (2263, 2289), False, 'import os\n'), ((4524, 4546), 'numpy.mean', 'np.mean', (['results[item]'], {}), '(results[item])\n', (4531, 4546), True, 'import numpy as np\n'), ((5151, 5162), 'time.time', 'time.time', ([], {}), '()\n', (5160, 5162), False, 'import time\n'), ((5195, 5223), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (5221, 5223), True, 'import paddle.fluid as fluid\n'), ((5453, 5464), 'time.time', 'time.time', ([], {}), '()\n', (5462, 5464), False, 'import time\n'), ((1362, 1373), 'time.time', 'time.time', ([], {}), '()\n', (1371, 1373), False, 'import time\n'), ((1410, 1438), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (1436, 1438), True, 'import paddle.fluid as fluid\n'), ((1622, 1633), 'time.time', 'time.time', ([], {}), '()\n', (1631, 1633), False, 'import time\n'), ((2663, 2674), 'time.time', 'time.time', ([], {}), '()\n', (2672, 2674), False, 'import time\n'), ((2711, 2739), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (2737, 2739), True, 'import paddle.fluid as fluid\n'), ((2983, 2994), 'time.time', 'time.time', ([], {}), '()\n', (2992, 2994), False, 'import time\n'), ((6073, 6084), 'time.time', 'time.time', ([], {}), '()\n', (6082, 6084), False, 'import time\n'), ((6121, 6149), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (6147, 6149), True, 'import paddle.fluid as fluid\n'), ((6422, 6433), 'time.time', 'time.time', ([], {}), '()\n', (6431, 6433), False, 'import time\n'), ((1867, 1887), 'paddle.distributed.fleet.worker_index', 'fleet.worker_index', ([], {}), '()\n', (1885, 1887), True, 'import paddle.distributed.fleet as fleet\n')] |
#! /mnt/software/unstowable/anaconda/bin/python
import sys
import os
import argparse
import subprocess
def generating_mapping(kraken_report):
kraken_dict = {}
with open (kraken_report, "r") as fp:
for line in fp:
line = line.split("\t")
if line[3] != 'S' and line[3] != '-':
continue
species = line[-1].split()
try:
species = species[0] + "_" + species[1]
except:
print("not a species?? : {}".format(species))
continue
kraken_dict[line[4]] = [species]
return kraken_dict
def kraken_binning(kraken_dict, kraken_out, output_file, abund_dict):
cmd = "cat {} | cut -f1-4".format(kraken_out)
kraken_result = subprocess.check_output(cmd, shell=True)
#print(kraken_result)
kraken_result = kraken_result.strip()
kraken_result = kraken_result.split("\n")
contigs_bin_dict = {}
with open (output_file, "w") as fp:
for item in kraken_result:
tax_id = item.split("\t")[2]
contigs = item.split("\t")[1]
if tax_id in kraken_dict:
species = kraken_dict[tax_id][0]
fp.write(contigs + "\t" + species + "\n")
#output_file = ["{}/BINNING/{}.fasta".format(output_folder, species), "{}/ABUND/sample_{}_bin_{}.abund1".format(output_folder, sample, species)]
#contigs_bin_dict[contigs] = output_file
return contigs_bin_dict
def main(args):
kraken_dict = generating_mapping(args.kraken_report)
abund_dict = {}
#with open(args.abund, "r") as fp:
# for line in fp:
# abund_dict[line.split("\t")[0]] = line
contigs_bin_dict = kraken_binning(kraken_dict, args.kraken_out, args.output, abund_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser("kraken binning")
mandatory = parser.add_argument_group("mandatory arguments")
mandatory.add_argument("-k", "--kraken_out",
required=True,
help="kraken out file")
mandatory.add_argument("-r", "--kraken_report",
required=True,
help="kraken out report file")
mandatory.add_argument("-c", "--contig",
#required=True,
help="assembled contigs fasta file")
mandatory.add_argument("-o", "--output",
required=True,
help="output file")
mandatory.add_argument("-a", "--abund",
#required=True,
help="abundance file")
args=parser.parse_args()
main(args)
| [
"subprocess.check_output",
"argparse.ArgumentParser"
] | [((802, 842), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (825, 842), False, 'import subprocess\n'), ((1975, 2016), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""kraken binning"""'], {}), "('kraken binning')\n", (1998, 2016), False, 'import argparse\n')] |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from robot.running import ArgInfo, ArgumentSpec
from robot.errors import DataError
from .model import LibraryDoc, KeywordDoc
class JsonDocBuilder:
def build(self, path):
spec = self._parse_spec_json(path)
return self.build_from_dict(spec)
def build_from_dict(self, spec):
libdoc = LibraryDoc(name=spec['name'],
doc=spec['doc'],
version=spec['version'],
type=spec['type'],
scope=spec['scope'],
doc_format=spec['docFormat'],
source=spec['source'],
lineno=int(spec.get('lineno', -1)))
libdoc.data_types.update(spec['dataTypes'].get('enums', []))
libdoc.data_types.update(spec['dataTypes'].get('typedDicts', []))
libdoc.inits = [self._create_keyword(kw) for kw in spec['inits']]
libdoc.keywords = [self._create_keyword(kw) for kw in spec['keywords']]
return libdoc
def _parse_spec_json(self, path):
if not os.path.isfile(path):
raise DataError("Spec file '%s' does not exist." % path)
with open(path) as json_source:
libdoc_dict = json.load(json_source)
return libdoc_dict
def _create_keyword(self, kw):
return KeywordDoc(name=kw.get('name'),
args=self._create_arguments(kw['args']),
doc=kw['doc'],
shortdoc=kw['shortdoc'],
tags=kw['tags'],
source=kw['source'],
lineno=int(kw.get('lineno', -1)))
def _create_arguments(self, arguments):
spec = ArgumentSpec()
setters = {
ArgInfo.POSITIONAL_ONLY: spec.positional_only.append,
ArgInfo.POSITIONAL_ONLY_MARKER: lambda value: None,
ArgInfo.POSITIONAL_OR_NAMED: spec.positional_or_named.append,
ArgInfo.VAR_POSITIONAL: lambda value: setattr(spec, 'var_positional', value),
ArgInfo.NAMED_ONLY_MARKER: lambda value: None,
ArgInfo.NAMED_ONLY: spec.named_only.append,
ArgInfo.VAR_NAMED: lambda value: setattr(spec, 'var_named', value),
}
for arg in arguments:
name = arg['name']
setters[arg['kind']](name)
default = arg.get('defaultValue')
if default is not None:
spec.defaults[name] = default
arg_types = arg['types']
if not spec.types:
spec.types = {}
spec.types[name] = tuple(arg_types)
return spec
| [
"json.load",
"robot.running.ArgumentSpec",
"robot.errors.DataError"
] | [((2419, 2433), 'robot.running.ArgumentSpec', 'ArgumentSpec', ([], {}), '()\n', (2431, 2433), False, 'from robot.running import ArgInfo, ArgumentSpec\n'), ((1800, 1850), 'robot.errors.DataError', 'DataError', (['("Spec file \'%s\' does not exist." % path)'], {}), '("Spec file \'%s\' does not exist." % path)\n', (1809, 1850), False, 'from robot.errors import DataError\n'), ((1917, 1939), 'json.load', 'json.load', (['json_source'], {}), '(json_source)\n', (1926, 1939), False, 'import json\n')] |
import os
import random
import shutil
import time
import whisper
from django.conf import settings
from django.test import override_settings
from mock import patch, Mock
from .base import TestCase
from graphite.finders.utils import BaseFinder
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.readers.utils import BaseReader
from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index
from graphite.tags.localdatabase import LocalDatabaseTagDB
from graphite.worker_pool.pool import PoolTimeoutError
from graphite.render.datalib import TimeSeries
from graphite.render.evaluator import evaluateTarget
from graphite.util import epoch_to_dt
class StorageTest(TestCase):
def test_fetch(self):
disabled_finder = get_finders('tests.test_storage.DisabledFinder')[0]
legacy_finder = get_finders('tests.test_storage.LegacyFinder')[0]
test_finder = get_finders('tests.test_storage.TestFinder')[0]
remote_finder = get_finders('tests.test_storage.RemoteFinder')[0]
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# tagb is properly initialized
self.assertIsInstance(store.tagdb, LocalDatabaseTagDB)
# get all enabled finders
finders = store.get_finders()
self.assertEqual(list(finders), [legacy_finder, test_finder, remote_finder])
# get only local finders
finders = store.get_finders(local=True)
self.assertEqual(list(finders), [legacy_finder, test_finder])
# fetch with empty patterns
result = store.fetch([], 1, 2, 3, {})
self.assertEqual(result, [])
# fetch
result = store.fetch(['a.**'], 1, 2, 3, {})
self.assertEqual(len(result), 3)
result.sort(key=lambda node: node['name'])
self.assertEqual(result[0]['name'], 'a.b.c.d')
self.assertEqual(result[0]['pathExpression'], 'a.**')
self.assertEqual(result[1]['name'], 'a.b.c.d')
self.assertEqual(result[1]['pathExpression'], 'a.**')
self.assertEqual(result[2]['name'], 'a.b.c.e')
self.assertEqual(result[2]['pathExpression'], 'a.**')
def test_fetch_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
message = 'Timed out after [-.e0-9]+s for fetch for \[\'a\'\]'
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], message)
def test_fetch_all_failed(self):
# all finds failed
store = Store(
finders=[TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(1\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
def test_fetch_some_failed(self):
# some finders failed
store = Store(
finders=[TestFinder(), RemoteFinder()]
)
with patch('graphite.storage.log.info') as log_info:
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
@override_settings(STORE_FAIL_ON_ERROR=True)
def test_fetch_some_failed_hard_fail_enabled(self):
# all finds failed
store = Store(
finders=[TestFinder(), RemoteFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, '1 request\(s\) failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for fetch for \[\'a\'\] \(2\)'):
list(store.fetch(['a'], 1, 2, 3, {}))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during fetch for \[\'a\'\] after [-.e0-9]+s: TestFinder.find_nodes')
def test_find(self):
disabled_finder = DisabledFinder()
legacy_finder = LegacyFinder()
test_finder = TestFinder()
remote_finder = RemoteFinder()
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# find nodes
result = list(store.find('a'))
self.assertEqual(len(result), 5)
for node in result:
if node.path in ['a.b.c.d', 'a.b.c.e']:
self.assertIsInstance(node, LeafNode)
else:
self.assertIsInstance(node, BranchNode)
self.assertTrue(node.path in ['a', 'a.b', 'a.b.c'])
# find leaves only
result = list(store.find('a', leaves_only=True))
self.assertEqual(len(result), 2)
for node in result:
self.assertIsInstance(node, LeafNode)
self.assertTrue(node.path in ['a.b.c.d', 'a.b.c.e'])
# failure threshold
with self.settings(METRICS_FIND_FAILURE_THRESHOLD=1):
with self.assertRaisesRegexp(Exception, 'Query a yields too many results and failed \(failure threshold is 1\)'):
list(store.find('a'))
# warning threshold
with self.settings(METRICS_FIND_WARNING_THRESHOLD=1):
with patch('graphite.storage.log.warning') as log_warning:
list(store.find('a'))
self.assertEqual(log_warning.call_count, 1)
self.assertEqual(
log_warning.call_args[0][0],
'Query a yields large number of results up to 2 (warning threshold is 1)'
)
def test_find_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
message = 'Timed out after [-.e0-9]+s for find <FindQuery: a from \* until \*>'
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], message)
def test_find_all_failed(self):
# all finds failed
store = Store(
finders=[TestFinder()]
)
message = 'All requests failed for find <FindQuery: a from \* until \*>'
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(
log_info.call_args[0][0],
'Exception during find <FindQuery: a from \* until \*> after [-.e0-9]+s: TestFinder.find_nodes'
)
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, message):
list(store.find('a'))
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(
log_info.call_args[0][0],
'Exception during find <FindQuery: a from \* until \*> after [-.e0-9]+s: TestFinder.find_nodes'
)
@override_settings(REMOTE_STORE_FORWARD_HEADERS=['X-Test1', 'X-Test2'])
def test_extractForwardHeaders(self):
class DummyRequest(object):
META = {
'HTTP_X_TEST1': 'test',
}
headers = extractForwardHeaders(DummyRequest())
self.assertEqual(headers, {'X-Test1': 'test'})
def test_get_index(self):
disabled_finder = DisabledFinder()
# use get_finders so legacy_finder is patched with get_index
legacy_finder = get_finders('tests.test_storage.LegacyFinder')[0]
test_finder = TestFinder()
remote_finder = RemoteFinder()
store = Store(
finders=[disabled_finder, legacy_finder, test_finder, remote_finder],
tagdb=get_tagdb('graphite.tags.localdatabase.LocalDatabaseTagDB')
)
# get index
result = store.get_index()
self.assertEqual(result, ['a.b.c.d', 'a.b.c.e'])
# get local index
result = store.get_index({'localOnly': True})
self.assertEqual(result, ['a.b.c.d'])
def test_get_index_pool_timeout(self):
# pool timeout
store = Store(
finders=[RemoteFinder()]
)
def mock_pool_exec(pool, jobs, timeout):
raise PoolTimeoutError()
with patch('graphite.storage.pool_exec', mock_pool_exec):
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'Timed out after .*'):
store.get_index()
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Timed out after [-.e0-9]+s')
def test_get_index_all_failed(self):
# all finders failed
store = Store(
finders=[TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for get_index'):
store.get_index()
self.assertEqual(log_info.call_count, 1)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during get_index after [-.e0-9]+s: TestFinder.find_nodes')
store = Store(
finders=[TestFinder(), TestFinder()]
)
with patch('graphite.storage.log.info') as log_info:
with self.assertRaisesRegexp(Exception, 'All requests failed for get_index \(2\)'):
store.get_index()
self.assertEqual(log_info.call_count, 2)
self.assertRegexpMatches(log_info.call_args[0][0], 'Exception during get_index after [-.e0-9]+s: TestFinder.find_nodes')
@override_settings(USE_WORKER_POOL=False)
def test_fetch_tag_support(self):
class TestFinderTags(BaseFinder):
tags = True
def find_nodes(self, query):
pass
def fetch(self, patterns, start_time, end_time, now=None, requestContext=None):
if patterns != ['seriesByTag("hello=tiger")', 'seriesByTag("name=notags")', 'seriesByTag("name=testtags")', 'testtags;hello=tiger']:
raise Exception('Unexpected patterns %s' % str(patterns))
return [
{
'pathExpression': 'testtags;hello=tiger',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
{
'pathExpression': 'seriesByTag("hello=tiger")',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
{
'pathExpression': 'seriesByTag("name=testtags")',
'name': 'testtags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
},
]
tagdb = Mock()
store = Store(
finders=[TestFinderTags()],
tagdb=tagdb
)
request_context = {
'startTime': epoch_to_dt(0),
'endTime': epoch_to_dt(60),
'now': epoch_to_dt(60),
}
with patch('graphite.render.datalib.STORE', store):
results = evaluateTarget(request_context, ['testtags;hello=tiger', 'seriesByTag("hello=tiger")', 'seriesByTag("name=testtags")', 'seriesByTag("name=notags")'])
self.assertEqual(results, [
TimeSeries('testtags;hello=tiger', 0, 60, 1, []),
TimeSeries('testtags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("hello=tiger")'),
TimeSeries('testtags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("name=testtags")'),
])
@override_settings(USE_WORKER_POOL=True)
def test_fetch_no_tag_support(self):
class TestFinderNoTags(BaseFinder):
tags = False
def find_nodes(self, query):
pass
def fetch(self, patterns, start_time, end_time, now=None, requestContext=None):
if patterns != ['notags;hello=tiger']:
raise Exception('Unexpected patterns %s' % str(patterns))
return [
{
'pathExpression': 'notags;hello=tiger',
'name': 'notags;hello=tiger',
'time_info': (0, 60, 1),
'values': [],
}
]
tagdb = Mock()
def mockFindSeries(exprs, requestContext=None):
self.assertEqual(requestContext, request_context)
if exprs == ('hello=tiger',) or exprs == ('name=notags',):
return ['notags;hello=tiger']
if exprs == ('name=testtags',):
return []
raise Exception('Unexpected exprs %s' % str(exprs))
tagdb.find_series.side_effect = mockFindSeries
store = Store(
finders=[TestFinderNoTags()],
tagdb=tagdb
)
with patch('graphite.render.datalib.STORE', store):
request_context = {
'startTime': epoch_to_dt(0),
'endTime': epoch_to_dt(60),
'now': epoch_to_dt(60),
}
results = evaluateTarget(request_context, ['notags;hello=tiger', 'seriesByTag("hello=tiger")', 'seriesByTag("name=testtags")', 'seriesByTag("name=notags")'])
self.assertEqual(tagdb.find_series.call_count, 3)
self.assertEqual(results, [
TimeSeries('notags;hello=tiger', 0, 60, 1, []),
TimeSeries('notags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("hello=tiger")'),
TimeSeries('notags;hello=tiger', 0, 60, 1, [], pathExpression='seriesByTag("name=notags")'),
])
def test_autocomplete(self):
test = self
class TestFinderTags(BaseFinder):
tags = True
def __init__(self, request_limit=100, request_context=None):
self.limit = request_limit
self.context = request_context or {}
def find_nodes(self, query):
pass
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
test.assertEqual(exprs, ['tag1=value1'])
test.assertEqual(tagPrefix, 'test')
test.assertEqual(limit, self.limit)
test.assertEqual(requestContext, self.context)
return ['testtags']
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
test.assertEqual(exprs, ['tag1=value1'])
test.assertEqual(tag, 'tag2')
test.assertEqual(valuePrefix, 'test')
test.assertEqual(limit, self.limit)
test.assertEqual(requestContext, self.context)
return ['testtags']
class TestFinderNoTags(BaseFinder):
tags = False
def find_nodes(self, query):
pass
class TestFinderTagsException(TestFinderTags):
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
raise Exception('TestFinderTagsException.auto_complete_tags')
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
raise Exception('TestFinderTagsException.auto_complete_values')
class TestFinderTagsTimeout(TestFinderTags):
def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=None):
time.sleep(0.1)
return ['testtags']
def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, requestContext=None):
time.sleep(0.1)
return ['testtags']
def mockStore(finders, request_limit=100, request_context=None):
tagdb = Mock()
def mockAutoCompleteTags(exprs, tagPrefix=None, limit=None, requestContext=None):
self.assertEqual(exprs, ['tag1=value1'])
self.assertEqual(tagPrefix, 'test')
self.assertEqual(limit, request_limit)
self.assertEqual(requestContext, request_context or {})
return ['testnotags']
tagdb.auto_complete_tags.side_effect = mockAutoCompleteTags
def mockAutoCompleteValues(exprs, tag, valuePrefix=None, limit=None, requestContext=None):
self.assertEqual(exprs, ['tag1=value1'])
self.assertEqual(tag, 'tag2')
self.assertEqual(valuePrefix, 'test')
self.assertEqual(limit, request_limit)
self.assertEqual(requestContext, request_context or {})
return ['testnotags']
tagdb.auto_complete_values.side_effect = mockAutoCompleteValues
return Store(
finders=finders,
tagdb=tagdb,
)
request_context = {}
# test with both tag-enabled and non-tag-enabled finders
store = mockStore([TestFinderTags(), TestFinderNoTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
# test with no limit & no requestContext
store = mockStore([TestFinderTags(None, {}), TestFinderNoTags()], None, {})
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test')
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test')
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags', 'testtags'])
# test with only tag-enabled finder
store = mockStore([TestFinderTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 0)
self.assertEqual(result, ['testtags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 0)
self.assertEqual(result, ['testtags'])
# test with only non-tag-enabled finder
store = mockStore([TestFinderNoTags()])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 1)
self.assertEqual(result, ['testnotags'])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 1)
self.assertEqual(result, ['testnotags'])
# test with no finders
store = mockStore([])
result = store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_tags.call_count, 0)
self.assertEqual(result, [])
result = store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
self.assertEqual(store.tagdb.auto_complete_values.call_count, 0)
self.assertEqual(result, [])
# test exception handling with one finder
store = mockStore([TestFinderTagsException()])
with self.assertRaisesRegexp(Exception, 'All requests failed for tags for \[\'tag1=value1\'\] test.*'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, 'All requests failed for values for \[\'tag1=value1\'\] tag2 test.*'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test exception handling with more than one finder
store = mockStore([TestFinderTagsException(), TestFinderTagsException()])
with self.assertRaisesRegexp(Exception, 'All requests failed for tags for \[\'tag1=value1\'\] test'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, 'All requests failed for values for \[\'tag1=value1\'\] tag2 test'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test pool timeout handling
store = mockStore([TestFinderTagsTimeout()])
with self.settings(USE_WORKER_POOL=True, FIND_TIMEOUT=0):
with self.assertRaisesRegexp(Exception, 'Timed out after [-.e0-9]+s for tags for \[\'tag1=value1\'\]'):
store.tagdb_auto_complete_tags(['tag1=value1'], 'test', 100, request_context)
with self.assertRaisesRegexp(Exception, 'Timed out after [-.e0-9]+s for values for \[\'tag1=value1\'\] tag2 test'):
store.tagdb_auto_complete_values(['tag1=value1'], 'tag2', 'test', 100, request_context)
# test write_index
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
def create_whisper_hosts(self):
worker1 = self.hostcpu.replace('hostname', 'worker1')
worker2 = self.hostcpu.replace('hostname', 'worker2')
bogus_file = os.path.join(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt')
try:
os.makedirs(worker1.replace('cpu.wsp', ''))
os.makedirs(worker2.replace('cpu.wsp', ''))
os.makedirs(bogus_file.replace('bogus_file.txt', ''))
except OSError:
pass
open(bogus_file, 'a').close()
whisper.create(worker1, [(1, 60)])
whisper.create(worker2, [(1, 60)])
ts = int(time.time())
whisper.update(worker1, 1, ts)
whisper.update(worker2, 2, ts)
def wipe_whisper_hosts(self):
try:
os.remove(self.hostcpu.replace('hostname', 'worker1'))
os.remove(self.hostcpu.replace('hostname', 'worker2'))
os.remove(os.path.join(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt'))
shutil.rmtree(self.hostcpu.replace('hostname/cpu.wsp', ''))
shutil.rmtree(os.path.join(settings.WHISPER_DIR, 'a'))
except OSError:
pass
def test_write_index(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
self.assertEqual(None, write_index() )
self.assertEqual(None, write_index(settings.INDEX_FILE) )
class DisabledFinder(object):
disabled = True
def find_nodes(self, query):
pass
class LegacyFinder(object):
def find_nodes(self, query):
yield BranchNode('a')
yield BranchNode('a.b')
yield BranchNode('a.b.c')
yield LeafNode('a.b.c.d', DummyReader('a.b.c.d'))
class DummyReader(BaseReader):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, startTime, endTime, now=None, requestContext=None):
npoints = (endTime - startTime) // 10
return (startTime, endTime, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class RemoteFinder(BaseFinder):
local = False
def find_nodes(self, query):
yield BranchNode('a.b.c')
yield LeafNode('a.b.c.d', DummyReader('a.b.c.d'))
yield LeafNode('a.b.c.e', DummyReader('a.b.c.e'))
class TestFinder(BaseFinder):
def find_nodes(self, query):
raise Exception('TestFinder.find_nodes')
| [
"graphite.node.BranchNode",
"graphite.util.epoch_to_dt",
"whisper.create",
"mock.patch",
"graphite.render.evaluator.evaluateTarget",
"graphite.storage.write_index",
"random.choice",
"mock.Mock",
"graphite.render.datalib.TimeSeries",
"graphite.storage.Store",
"os.path.join",
"graphite.worker_po... | [((4512, 4555), 'django.test.override_settings', 'override_settings', ([], {'STORE_FAIL_ON_ERROR': '(True)'}), '(STORE_FAIL_ON_ERROR=True)\n', (4529, 4555), False, 'from django.test import override_settings\n'), ((8681, 8751), 'django.test.override_settings', 'override_settings', ([], {'REMOTE_STORE_FORWARD_HEADERS': "['X-Test1', 'X-Test2']"}), "(REMOTE_STORE_FORWARD_HEADERS=['X-Test1', 'X-Test2'])\n", (8698, 8751), False, 'from django.test import override_settings\n'), ((11077, 11117), 'django.test.override_settings', 'override_settings', ([], {'USE_WORKER_POOL': '(False)'}), '(USE_WORKER_POOL=False)\n', (11094, 11117), False, 'from django.test import override_settings\n'), ((12919, 12958), 'django.test.override_settings', 'override_settings', ([], {'USE_WORKER_POOL': '(True)'}), '(USE_WORKER_POOL=True)\n', (12936, 12958), False, 'from django.test import override_settings\n'), ((21746, 21806), 'os.path.join', 'os.path.join', (['settings.WHISPER_DIR', '"""hosts/hostname/cpu.wsp"""'], {}), "(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')\n", (21758, 21806), False, 'import os\n'), ((12168, 12174), 'mock.Mock', 'Mock', ([], {}), '()\n', (12172, 12174), False, 'from mock import patch, Mock\n'), ((13530, 13536), 'mock.Mock', 'Mock', ([], {}), '()\n', (13534, 13536), False, 'from mock import patch, Mock\n'), ((21975, 22033), 'os.path.join', 'os.path.join', (['settings.WHISPER_DIR', '"""a/b/c/bogus_file.txt"""'], {}), "(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt')\n", (21987, 22033), False, 'import os\n'), ((22274, 22308), 'whisper.create', 'whisper.create', (['worker1', '[(1, 60)]'], {}), '(worker1, [(1, 60)])\n', (22288, 22308), False, 'import whisper\n'), ((22313, 22347), 'whisper.create', 'whisper.create', (['worker2', '[(1, 60)]'], {}), '(worker2, [(1, 60)])\n', (22327, 22347), False, 'import whisper\n'), ((22379, 22409), 'whisper.update', 'whisper.update', (['worker1', '(1)', 'ts'], {}), '(worker1, 1, ts)\n', (22393, 22409), False, 'import whisper\n'), ((22414, 22444), 'whisper.update', 'whisper.update', (['worker2', '(2)', 'ts'], {}), '(worker2, 2, ts)\n', (22428, 22444), False, 'import whisper\n'), ((815, 863), 'graphite.storage.get_finders', 'get_finders', (['"""tests.test_storage.DisabledFinder"""'], {}), "('tests.test_storage.DisabledFinder')\n", (826, 863), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((887, 933), 'graphite.storage.get_finders', 'get_finders', (['"""tests.test_storage.LegacyFinder"""'], {}), "('tests.test_storage.LegacyFinder')\n", (898, 933), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((955, 999), 'graphite.storage.get_finders', 'get_finders', (['"""tests.test_storage.TestFinder"""'], {}), "('tests.test_storage.TestFinder')\n", (966, 999), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((1023, 1069), 'graphite.storage.get_finders', 'get_finders', (['"""tests.test_storage.RemoteFinder"""'], {}), "('tests.test_storage.RemoteFinder')\n", (1034, 1069), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((2379, 2397), 'graphite.worker_pool.pool.PoolTimeoutError', 'PoolTimeoutError', ([], {}), '()\n', (2395, 2397), False, 'from graphite.worker_pool.pool import PoolTimeoutError\n'), ((2475, 2526), 'mock.patch', 'patch', (['"""graphite.storage.pool_exec"""', 'mock_pool_exec'], {}), "('graphite.storage.pool_exec', mock_pool_exec)\n", (2480, 2526), False, 'from mock import patch, Mock\n'), ((2933, 2967), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (2938, 2967), False, 'from mock import patch, Mock\n'), ((3390, 3424), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (3395, 3424), False, 'from mock import patch, Mock\n'), ((3911, 3945), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (3916, 3945), False, 'from mock import patch, Mock\n'), ((4130, 4164), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (4135, 4164), False, 'from mock import patch, Mock\n'), ((4713, 4747), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (4718, 4747), False, 'from mock import patch, Mock\n'), ((5172, 5206), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (5177, 5206), False, 'from mock import patch, Mock\n'), ((7249, 7267), 'graphite.worker_pool.pool.PoolTimeoutError', 'PoolTimeoutError', ([], {}), '()\n', (7265, 7267), False, 'from graphite.worker_pool.pool import PoolTimeoutError\n'), ((7362, 7413), 'mock.patch', 'patch', (['"""graphite.storage.pool_exec"""', 'mock_pool_exec'], {}), "('graphite.storage.pool_exec', mock_pool_exec)\n", (7367, 7413), False, 'from mock import patch, Mock\n'), ((7880, 7914), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (7885, 7914), False, 'from mock import patch, Mock\n'), ((8318, 8352), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (8323, 8352), False, 'from mock import patch, Mock\n'), ((9136, 9182), 'graphite.storage.get_finders', 'get_finders', (['"""tests.test_storage.LegacyFinder"""'], {}), "('tests.test_storage.LegacyFinder')\n", (9147, 9182), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((9817, 9835), 'graphite.worker_pool.pool.PoolTimeoutError', 'PoolTimeoutError', ([], {}), '()\n', (9833, 9835), False, 'from graphite.worker_pool.pool import PoolTimeoutError\n'), ((9846, 9897), 'mock.patch', 'patch', (['"""graphite.storage.pool_exec"""', 'mock_pool_exec'], {}), "('graphite.storage.pool_exec', mock_pool_exec)\n", (9851, 9897), False, 'from mock import patch, Mock\n'), ((10324, 10358), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (10329, 10358), False, 'from mock import patch, Mock\n'), ((10735, 10769), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (10740, 10769), False, 'from mock import patch, Mock\n'), ((12297, 12311), 'graphite.util.epoch_to_dt', 'epoch_to_dt', (['(0)'], {}), '(0)\n', (12308, 12311), False, 'from graphite.util import epoch_to_dt\n'), ((12330, 12345), 'graphite.util.epoch_to_dt', 'epoch_to_dt', (['(60)'], {}), '(60)\n', (12341, 12345), False, 'from graphite.util import epoch_to_dt\n'), ((12360, 12375), 'graphite.util.epoch_to_dt', 'epoch_to_dt', (['(60)'], {}), '(60)\n', (12371, 12375), False, 'from graphite.util import epoch_to_dt\n'), ((12393, 12438), 'mock.patch', 'patch', (['"""graphite.render.datalib.STORE"""', 'store'], {}), "('graphite.render.datalib.STORE', store)\n", (12398, 12438), False, 'from mock import patch, Mock\n'), ((12456, 12613), 'graphite.render.evaluator.evaluateTarget', 'evaluateTarget', (['request_context', '[\'testtags;hello=tiger\', \'seriesByTag("hello=tiger")\',\n \'seriesByTag("name=testtags")\', \'seriesByTag("name=notags")\']'], {}), '(request_context, [\'testtags;hello=tiger\',\n \'seriesByTag("hello=tiger")\', \'seriesByTag("name=testtags")\',\n \'seriesByTag("name=notags")\'])\n', (12470, 12613), False, 'from graphite.render.evaluator import evaluateTarget\n'), ((14005, 14050), 'mock.patch', 'patch', (['"""graphite.render.datalib.STORE"""', 'store'], {}), "('graphite.render.datalib.STORE', store)\n", (14010, 14050), False, 'from mock import patch, Mock\n'), ((14208, 14363), 'graphite.render.evaluator.evaluateTarget', 'evaluateTarget', (['request_context', '[\'notags;hello=tiger\', \'seriesByTag("hello=tiger")\',\n \'seriesByTag("name=testtags")\', \'seriesByTag("name=notags")\']'], {}), '(request_context, [\'notags;hello=tiger\',\n \'seriesByTag("hello=tiger")\', \'seriesByTag("name=testtags")\',\n \'seriesByTag("name=notags")\'])\n', (14222, 14363), False, 'from graphite.render.evaluator import evaluateTarget\n'), ((16623, 16629), 'mock.Mock', 'Mock', ([], {}), '()\n', (16627, 16629), False, 'from mock import patch, Mock\n'), ((17477, 17512), 'graphite.storage.Store', 'Store', ([], {'finders': 'finders', 'tagdb': 'tagdb'}), '(finders=finders, tagdb=tagdb)\n', (17482, 17512), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((22362, 22373), 'time.time', 'time.time', ([], {}), '()\n', (22371, 22373), False, 'import time\n'), ((22979, 22992), 'graphite.storage.write_index', 'write_index', ([], {}), '()\n', (22990, 22992), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((23022, 23054), 'graphite.storage.write_index', 'write_index', (['settings.INDEX_FILE'], {}), '(settings.INDEX_FILE)\n', (23033, 23054), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((23219, 23234), 'graphite.node.BranchNode', 'BranchNode', (['"""a"""'], {}), "('a')\n", (23229, 23234), False, 'from graphite.node import LeafNode, BranchNode\n'), ((23245, 23262), 'graphite.node.BranchNode', 'BranchNode', (['"""a.b"""'], {}), "('a.b')\n", (23255, 23262), False, 'from graphite.node import LeafNode, BranchNode\n'), ((23273, 23292), 'graphite.node.BranchNode', 'BranchNode', (['"""a.b.c"""'], {}), "('a.b.c')\n", (23283, 23292), False, 'from graphite.node import LeafNode, BranchNode\n'), ((23895, 23914), 'graphite.node.BranchNode', 'BranchNode', (['"""a.b.c"""'], {}), "('a.b.c')\n", (23905, 23914), False, 'from graphite.node import LeafNode, BranchNode\n'), ((1181, 1240), 'graphite.storage.get_tagdb', 'get_tagdb', (['"""graphite.tags.localdatabase.LocalDatabaseTagDB"""'], {}), "('graphite.tags.localdatabase.LocalDatabaseTagDB')\n", (1190, 1240), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((2539, 2573), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (2544, 2573), False, 'from mock import patch, Mock\n'), ((5822, 5881), 'graphite.storage.get_tagdb', 'get_tagdb', (['"""graphite.tags.localdatabase.LocalDatabaseTagDB"""'], {}), "('graphite.tags.localdatabase.LocalDatabaseTagDB')\n", (5831, 5881), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((6784, 6821), 'mock.patch', 'patch', (['"""graphite.storage.log.warning"""'], {}), "('graphite.storage.log.warning')\n", (6789, 6821), False, 'from mock import patch, Mock\n'), ((7426, 7460), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (7431, 7460), False, 'from mock import patch, Mock\n'), ((9360, 9419), 'graphite.storage.get_tagdb', 'get_tagdb', (['"""graphite.tags.localdatabase.LocalDatabaseTagDB"""'], {}), "('graphite.tags.localdatabase.LocalDatabaseTagDB')\n", (9369, 9419), False, 'from graphite.storage import Store, extractForwardHeaders, get_finders, get_tagdb, write_index\n'), ((9910, 9944), 'mock.patch', 'patch', (['"""graphite.storage.log.info"""'], {}), "('graphite.storage.log.info')\n", (9915, 9944), False, 'from mock import patch, Mock\n'), ((14099, 14113), 'graphite.util.epoch_to_dt', 'epoch_to_dt', (['(0)'], {}), '(0)\n', (14110, 14113), False, 'from graphite.util import epoch_to_dt\n'), ((14134, 14149), 'graphite.util.epoch_to_dt', 'epoch_to_dt', (['(60)'], {}), '(60)\n', (14145, 14149), False, 'from graphite.util import epoch_to_dt\n'), ((14166, 14181), 'graphite.util.epoch_to_dt', 'epoch_to_dt', (['(60)'], {}), '(60)\n', (14177, 14181), False, 'from graphite.util import epoch_to_dt\n'), ((16340, 16355), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (16350, 16355), False, 'import time\n'), ((16494, 16509), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (16504, 16509), False, 'import time\n'), ((22625, 22683), 'os.path.join', 'os.path.join', (['settings.WHISPER_DIR', '"""a/b/c/bogus_file.txt"""'], {}), "(settings.WHISPER_DIR, 'a/b/c/bogus_file.txt')\n", (22637, 22683), False, 'import os\n'), ((22771, 22810), 'os.path.join', 'os.path.join', (['settings.WHISPER_DIR', '"""a"""'], {}), "(settings.WHISPER_DIR, 'a')\n", (22783, 22810), False, 'import os\n'), ((23636, 23666), 'random.choice', 'random.choice', (['[None, 1, 2, 3]'], {}), '([None, 1, 2, 3])\n', (23649, 23666), False, 'import random\n'), ((12648, 12696), 'graphite.render.datalib.TimeSeries', 'TimeSeries', (['"""testtags;hello=tiger"""', '(0)', '(60)', '(1)', '[]'], {}), "('testtags;hello=tiger', 0, 60, 1, [])\n", (12658, 12696), False, 'from graphite.render.datalib import TimeSeries\n'), ((12706, 12804), 'graphite.render.datalib.TimeSeries', 'TimeSeries', (['"""testtags;hello=tiger"""', '(0)', '(60)', '(1)', '[]'], {'pathExpression': '"""seriesByTag("hello=tiger")"""'}), '(\'testtags;hello=tiger\', 0, 60, 1, [], pathExpression=\n \'seriesByTag("hello=tiger")\')\n', (12716, 12804), False, 'from graphite.render.datalib import TimeSeries\n'), ((12809, 12909), 'graphite.render.datalib.TimeSeries', 'TimeSeries', (['"""testtags;hello=tiger"""', '(0)', '(60)', '(1)', '[]'], {'pathExpression': '"""seriesByTag("name=testtags")"""'}), '(\'testtags;hello=tiger\', 0, 60, 1, [], pathExpression=\n \'seriesByTag("name=testtags")\')\n', (12819, 12909), False, 'from graphite.render.datalib import TimeSeries\n'), ((14454, 14500), 'graphite.render.datalib.TimeSeries', 'TimeSeries', (['"""notags;hello=tiger"""', '(0)', '(60)', '(1)', '[]'], {}), "('notags;hello=tiger', 0, 60, 1, [])\n", (14464, 14500), False, 'from graphite.render.datalib import TimeSeries\n'), ((14510, 14606), 'graphite.render.datalib.TimeSeries', 'TimeSeries', (['"""notags;hello=tiger"""', '(0)', '(60)', '(1)', '[]'], {'pathExpression': '"""seriesByTag("hello=tiger")"""'}), '(\'notags;hello=tiger\', 0, 60, 1, [], pathExpression=\n \'seriesByTag("hello=tiger")\')\n', (14520, 14606), False, 'from graphite.render.datalib import TimeSeries\n'), ((14611, 14707), 'graphite.render.datalib.TimeSeries', 'TimeSeries', (['"""notags;hello=tiger"""', '(0)', '(60)', '(1)', '[]'], {'pathExpression': '"""seriesByTag("name=notags")"""'}), '(\'notags;hello=tiger\', 0, 60, 1, [], pathExpression=\n \'seriesByTag("name=notags")\')\n', (14621, 14707), False, 'from graphite.render.datalib import TimeSeries\n'), ((23788, 23799), 'time.time', 'time.time', ([], {}), '()\n', (23797, 23799), False, 'import time\n'), ((23768, 23779), 'time.time', 'time.time', ([], {}), '()\n', (23777, 23779), False, 'import time\n')] |
#
# Copyright 2002.2.rc1710017 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PyCOMPSs Mathematical Library: Algebra: Mean
============================================
This file contains the arithmetic mean algorithm.
"""
from pycompss.api.task import task
from pycompss.functions.reduce import mergeReduce
def _list_lenght(l):
"""
Recursive function to get the size of any list
"""
if l:
if not isinstance(l[0], list):
return 1 + _list_lenght(l[1:])
else:
return _list_lenght(l[0]) + _list_lenght(l[1:])
return 0
@task(returns=float)
def _mean(X, n):
return sum(X)/float(n)
def mean(X, wait=False):
"""
Arithmetic mean
:param X: chunked data
:param wait: if we want to wait for result. Default False
:return: mean of X.
"""
n = _list_lenght(X)
result = mergeReduce(reduce_add, [_mean(x, n) for x in X])
if wait:
from pycompss.api.api import compss_wait_on
result = compss_wait_on(result)
return result
| [
"pycompss.api.task.task",
"pycompss.api.api.compss_wait_on"
] | [((1150, 1169), 'pycompss.api.task.task', 'task', ([], {'returns': 'float'}), '(returns=float)\n', (1154, 1169), False, 'from pycompss.api.task import task\n'), ((1559, 1581), 'pycompss.api.api.compss_wait_on', 'compss_wait_on', (['result'], {}), '(result)\n', (1573, 1581), False, 'from pycompss.api.api import compss_wait_on\n')] |
import json
import requests
import dash
import dash_core_components as dcc
import dash_html_components as html
def server_setup(results):
app = dash.Dash(__name__)
app.layout = html.Div(children=[
html.H1(children='NHL 2018/2019 cumulative points stats'),
html.Div(children='''
'''),
dcc.Graph(
id='example-graph',
figure={
'data': [
*results
],
'layout': {
'title': 'NHL Data Visualization',
'xaxis': {
'title': 'Games Played',
},
'yaxis': {
'title': 'Points',
},
'height': 800,
'width': 1000,
}
},
)
])
app.run_server(debug=True)
def main():
# setup the time span
start_date = '2018-01-01'
end_date = '2019-01-01'
nhl_url = f'https://statsapi.web.nhl.com/api/v1/schedule?startDate={start_date}&endDate={end_date}'
s = requests.Session()
games = []
# download the games
raw_data = s.get(nhl_url).text
dates = json.loads(raw_data)['dates']
for date in dates:
games.extend(date['games'])
# filter the games
games = [game for game in games if all([
game['season'] == '20182019',
game['gameType'] == 'R',
])]
# create a data structure of results results = {wsh:{1:1, 2:3}, buf:{1:0, 2:2}}
results = {}
for game in games:
for team in game['teams']:
name = game['teams'][team]['team']['name']
record = game['teams'][team]['leagueRecord']
points = int(record['wins']) * 2 + int(record['ot'])
games = int(record['wins']) + int(record['losses']) + int(record['ot'])
results[name] = results.get(name, {}) # creating empty dict for storing results if doesn't exist yet
results[name][games] = points
# reformat the data structure for plotly into teams = [{'x':[1,2], 'y':[1,3], 'name':'wsh'}, ]
teams = []
points_matrix = {}
for result in results:
stats = results[result]
for game_n in stats:
points_matrix[game_n] = points_matrix.get(game_n, [])
points_matrix[game_n].append(stats[game_n])
points_matrix[game_n] = sorted(points_matrix[game_n])
print(len(points_matrix))
for result in sorted(results,
key=lambda result: list(results[result].values())[-1],
reverse=True):
game_ns = list(results[result].keys())
points = list(results[result].values())
place = [-(sorted(points_matrix[i] + [point], reverse=True).index(point) + 1) for i, point in
enumerate(points, 1)]
# points = [point-i for i, point in enumerate(points)]
teams.append({
'x': game_ns,
'y': points,
'name': f'{result} ({points[-1]}/{game_ns[-1]*2} pts = {round(points[-1]/game_ns[-1]/2*100,1)}%)',
'visible': 'legendonly',
})
server_setup(teams)
if __name__ == '__main__':
main()
| [
"json.loads",
"requests.Session",
"dash_html_components.H1",
"dash.Dash",
"dash_html_components.Div",
"dash_core_components.Graph"
] | [((150, 169), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (159, 169), False, 'import dash\n'), ((1127, 1145), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1143, 1145), False, 'import requests\n'), ((1234, 1254), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (1244, 1254), False, 'import json\n'), ((216, 273), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""NHL 2018/2019 cumulative points stats"""'}), "(children='NHL 2018/2019 cumulative points stats')\n", (223, 273), True, 'import dash_html_components as html\n'), ((284, 329), 'dash_html_components.Div', 'html.Div', ([], {'children': '"""\n \n """'}), "(children='\\n \\n ')\n", (292, 329), True, 'import dash_html_components as html\n'), ((342, 552), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""example-graph"""', 'figure': "{'data': [*results], 'layout': {'title': 'NHL Data Visualization', 'xaxis':\n {'title': 'Games Played'}, 'yaxis': {'title': 'Points'}, 'height': 800,\n 'width': 1000}}"}), "(id='example-graph', figure={'data': [*results], 'layout': {\n 'title': 'NHL Data Visualization', 'xaxis': {'title': 'Games Played'},\n 'yaxis': {'title': 'Points'}, 'height': 800, 'width': 1000}})\n", (351, 552), True, 'import dash_core_components as dcc\n')] |
#Squeeze example
import sys
sys.path.insert(0,'..')
import pycpdflib
#DLL loading depends on your own platform. These are the author's settings.
if sys.platform.startswith('darwin'):
pycpdflib.loadDLL("/Users/john/repos/python-libcpdf/libpycpdf.so")
elif sys.platform.startswith('linux'):
pycpdflib.loadDLL("../libpycpdf.so")
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
os.add_dll_directory("C:\\\\OCaml64/home/JohnWhitington/python-libcpdf/")
pycpdflib.loadDLL("libpycpdf.dll")
#Load file
pdf = pycpdflib.fromFile('../pycpdflibmanual.pdf', '')
#Squeeze it
pycpdflib.squeezeInMemory(pdf)
#Write output. We make sure to use toFileExt, and make object streams.
pycpdflib.toFileExt(pdf, 'squeezed.pdf', False, False, True, True, True)
| [
"sys.path.insert",
"pycpdflib.fromFile",
"sys.platform.startswith",
"pycpdflib.loadDLL",
"pycpdflib.toFileExt",
"pycpdflib.squeezeInMemory"
] | [((28, 52), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (43, 52), False, 'import sys\n'), ((149, 182), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (172, 182), False, 'import sys\n'), ((546, 594), 'pycpdflib.fromFile', 'pycpdflib.fromFile', (['"""../pycpdflibmanual.pdf"""', '""""""'], {}), "('../pycpdflibmanual.pdf', '')\n", (564, 594), False, 'import pycpdflib\n'), ((608, 638), 'pycpdflib.squeezeInMemory', 'pycpdflib.squeezeInMemory', (['pdf'], {}), '(pdf)\n', (633, 638), False, 'import pycpdflib\n'), ((711, 783), 'pycpdflib.toFileExt', 'pycpdflib.toFileExt', (['pdf', '"""squeezed.pdf"""', '(False)', '(False)', '(True)', '(True)', '(True)'], {}), "(pdf, 'squeezed.pdf', False, False, True, True, True)\n", (730, 783), False, 'import pycpdflib\n'), ((188, 254), 'pycpdflib.loadDLL', 'pycpdflib.loadDLL', (['"""/Users/john/repos/python-libcpdf/libpycpdf.so"""'], {}), "('/Users/john/repos/python-libcpdf/libpycpdf.so')\n", (205, 254), False, 'import pycpdflib\n'), ((260, 292), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (283, 292), False, 'import sys\n'), ((298, 334), 'pycpdflib.loadDLL', 'pycpdflib.loadDLL', (['"""../libpycpdf.so"""'], {}), "('../libpycpdf.so')\n", (315, 334), False, 'import pycpdflib\n'), ((340, 372), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (363, 372), False, 'import sys\n'), ((376, 409), 'sys.platform.startswith', 'sys.platform.startswith', (['"""cygwin"""'], {}), "('cygwin')\n", (399, 409), False, 'import sys\n'), ((493, 527), 'pycpdflib.loadDLL', 'pycpdflib.loadDLL', (['"""libpycpdf.dll"""'], {}), "('libpycpdf.dll')\n", (510, 527), False, 'import pycpdflib\n')] |
import imaplib
import email
from create_orders_from_email import get_email_contents
import time
import sys
with imaplib.IMAP4_SSL(host="imap.gmail.com", port=imaplib.IMAP4_SSL_PORT) as imap_ssl:
resp_code, response = imap_ssl.login(sys.argv[1], sys.argv[2])
while True:
resp_code, mail_count = imap_ssl.select(mailbox="INBOX", readonly=True)
resp_code, mails = imap_ssl.search(None, "UnSeen (SUBJECT 'OCRE')")
for mail_id in mails[0].decode().split()[-10:]:
resp_code, mail_data = imap_ssl.fetch(mail_id, '(RFC822)')
message = email.message_from_bytes(mail_data[0][1])
for part in message.walk():
if part.get_content_type() == "text/plain":
get_email_contents(email=part.get_payload(), token=sys.argv[3], secret_token=sys.argv[4])
time.sleep(30) | [
"email.message_from_bytes",
"time.sleep",
"imaplib.IMAP4_SSL"
] | [((113, 182), 'imaplib.IMAP4_SSL', 'imaplib.IMAP4_SSL', ([], {'host': '"""imap.gmail.com"""', 'port': 'imaplib.IMAP4_SSL_PORT'}), "(host='imap.gmail.com', port=imaplib.IMAP4_SSL_PORT)\n", (130, 182), False, 'import imaplib\n'), ((844, 858), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (854, 858), False, 'import time\n'), ((584, 625), 'email.message_from_bytes', 'email.message_from_bytes', (['mail_data[0][1]'], {}), '(mail_data[0][1])\n', (608, 625), False, 'import email\n')] |
import socket
import struct
class Patlite(object):
auto_update = True
OFF = 0
BLINK = 0x20
ON = 0x01
SHORT = 0x08
LONG = 0x10
STATUS_STRING = {
OFF:"Off",BLINK:"Blink",ON:"On",
SHORT:"Short",LONG:"Long"
}
RED = 0
YELLOW = 1
GREEN = 2
LED_STRING = ["Red","Yellow","Green"]
_led = [0, 0, 0]
_buzzer = 0
send = None
class NAKError(Exception):
pass
def __init__(self, host, port=10000, proto="TCP", timeout=2):
"""Connect to Patlite Signal Tower"""
self.host = host
self.port = port
self.timeout = timeout
if proto.upper() == "TCP":
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.send = self._send_tcp
elif proto.upper() == "UDP":
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.send = self._send_udp
else:
not NotImplementedError("Protocol '%s' is not supported." % proto)
self.sock.settimeout(timeout)
# Get curernt status
self.get_status()
# Implementation of Send
def _send_tcp(self, data):
"""Send implementation for TCP"""
self.sock.sendall(data)
def _send_udp(self, data):
"""Send implementation for UDP"""
self.sock.sendto(data, (self.host, self.port))
def close(self):
"""Close Socket"""
self.sock.close()
def send_status(self):
"""Send change state command."""
data = 0
for i, status in enumerate(self._led):
data |= (status << i)
data |= self._buzzer
self.send(struct.pack("2B", 0x57, data))
# Recv ACK
data, addr = self.sock.recvfrom(10)
if not data[:3] == "ACK":
raise self.NAKError()
def get_status(self):
"""Get current status from Patlite"""
self.send("\x52")
data, addr = self.sock.recvfrom(10)
if not data[0] == "R":
raise self.NAKError()
data = struct.unpack("B", data[1])[0]
# Parse LED statuses.
for i in xrange(3):
led = self.OFF
if (data & (self.ON << i)):
led = self.ON
elif (data & (self.BLINK << i)):
led = self.BLINK
self._led[i] = led
# Parse the buzzer status.
buzzer = self.OFF
if (data & (self.LONG)):
buzzer = self.LONG
elif (data & (self.LONG)):
buzzer = self.SHORT
self._buzzer = buzzer
def print_status(self):
"""Print current status."""
for i, status in enumerate(self._led):
print ("%7s : %s" % (self.LED_STRING[i], self.STATUS_STRING[status]))
print ("%7s : %s" % ("Buzzer", self.STATUS_STRING[self.buzzer]))
def set_led(self, led, value):
"""Change a LED state."""
self._led[led] = value
if self.auto_update:
self.send_status()
# LED propertiess
red = property(lambda self:self._led[self.RED],
lambda self, value:self.set_led(self.RED, value))
green = property(lambda self:self._led[self.GREEN],
lambda self, value:self.set_led(self.GREEN, value))
yellow = property(lambda self:self._led[self.YELLOW],
lambda self, value:self.set_led(self.YELLOW, value))
def set_buzzer(self, value):
"""Change the buzzer state."""
self._buzzer = value
if self.auto_update:
self.send_status()
# Buzzer property
buzzer = property(lambda self:self._buzzer,
lambda self, value:self.set_buzzer(value))
if __name__ == "__main__":
# For testing
import sys
host = sys.argv[1]
if len(sys.argv) >= 3:
port = int(sys.argv[2])
else:
port = 10000
if len(sys.argv) >= 4:
proto = sys.argv[3].upper()
else:
proto = "TCP"
p = Patlite(host, port, proto)
print ("""For examples.
p.red = p.ON
p.yellow = p.BLINK
p.green = p.OFF
p.buzzer = p.SHORT
""")
import code
code.InteractiveConsole(globals()).interact()
| [
"struct.unpack",
"struct.pack",
"socket.socket"
] | [((748, 797), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (761, 797), False, 'import socket\n'), ((1770, 1797), 'struct.pack', 'struct.pack', (['"""2B"""', '(87)', 'data'], {}), "('2B', 87, data)\n", (1781, 1797), False, 'import struct\n'), ((2159, 2186), 'struct.unpack', 'struct.unpack', (['"""B"""', 'data[1]'], {}), "('B', data[1])\n", (2172, 2186), False, 'import struct\n'), ((942, 990), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (955, 990), False, 'import socket\n')] |
"""Fake server"""
import asyncio
import logging
from typing import Callable, Dict, List, Optional, Tuple, Union
from . import (
AmxDuetRequest,
AmxDuetResponse,
AnswerCodes,
CommandNotRecognised,
CommandPacket,
ResponseException,
ResponsePacket,
read_command,
write_packet
)
_LOGGER = logging.getLogger(__name__)
class Server():
def __init__(self, host: str, port: int, model: str) -> None:
self._server: Optional[asyncio.AbstractServer] = None
self._host = host
self._port = port
self._handlers: Dict[Union[Tuple[int, int], Tuple[int, int, bytes]], Callable] = dict()
self._tasks: List[asyncio.Task] = list()
self._amxduet = AmxDuetResponse({
"Device-SDKClass": "Receiver",
"Device-Make": "ARCAM",
"Device-Model": model,
"Device-Revision": "x.y.z"
})
async def process(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
_LOGGER.debug("Client connected")
task = asyncio.current_task()
assert task
self._tasks.append(task)
try:
await self.process_runner(reader, writer)
finally:
_LOGGER.debug("Client disconnected")
self._tasks.remove(task)
async def process_runner(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while True:
request = await read_command(reader)
if request is None:
_LOGGER.debug("Client disconnected")
return
responses = await self.process_request(request)
_LOGGER.debug("Client command %s -> %s", request, responses)
for response in responses:
await write_packet(writer, response)
async def process_request(self, request: Union[CommandPacket, AmxDuetRequest]):
if isinstance(request, AmxDuetRequest):
return [self._amxduet]
handler = self._handlers.get((request.zn, request.cc, request.data))
if handler is None:
handler = self._handlers.get((request.zn, request.cc))
try:
if handler:
data = handler(
zn=request.zn,
cc=request.cc,
data=request.data)
if isinstance(data, bytes):
response = [
ResponsePacket(
request.zn,
request.cc,
AnswerCodes.STATUS_UPDATE,
data)
]
else:
response = data
else:
raise CommandNotRecognised()
except ResponseException as e:
response = [
ResponsePacket(
request.zn,
request.cc,
e.ac,
e.data or bytes()
)
]
return response
def register_handler(self, zn, cc, data, fun):
if data:
self._handlers[(zn, cc, data)] = fun
else:
self._handlers[(zn, cc)] = fun
async def start(self):
_LOGGER.debug("Starting server")
self._server = await asyncio.start_server(
self.process,
self._host,
self._port)
return self
async def stop(self):
if self._server:
_LOGGER.debug("Stopping server")
self._server.close()
await self._server.wait_closed()
self._server = None
if self._tasks:
_LOGGER.debug("Cancelling clients %s", self._tasks)
for task in self._tasks:
task.cancel()
await asyncio.wait(self._tasks)
class ServerContext():
def __init__(self, server: Server):
self._server = server
async def __aenter__(self):
await self._server.start()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._server.stop()
| [
"logging.getLogger",
"asyncio.start_server",
"asyncio.wait",
"asyncio.current_task"
] | [((323, 350), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (340, 350), False, 'import logging\n'), ((1046, 1068), 'asyncio.current_task', 'asyncio.current_task', ([], {}), '()\n', (1066, 1068), False, 'import asyncio\n'), ((3296, 3354), 'asyncio.start_server', 'asyncio.start_server', (['self.process', 'self._host', 'self._port'], {}), '(self.process, self._host, self._port)\n', (3316, 3354), False, 'import asyncio\n'), ((3793, 3818), 'asyncio.wait', 'asyncio.wait', (['self._tasks'], {}), '(self._tasks)\n', (3805, 3818), False, 'import asyncio\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Converter to convert a problem with inequality constraints to unconstrained with penalty terms."""
import logging
from typing import Optional, Union, Tuple, List, Dict
import numpy as np
from .quadratic_program_converter import QuadraticProgramConverter
from ..exceptions import QiskitOptimizationError
from ..problems.constraint import Constraint, ConstraintSense
from ..problems.quadratic_objective import QuadraticObjective
from ..problems.quadratic_program import QuadraticProgram
from ..problems.variable import Variable
logger = logging.getLogger(__name__)
class LinearInequalityToPenalty(QuadraticProgramConverter):
r"""Convert linear inequality constraints to penalty terms of the objective function.
There are some linear constraints which do not require slack variables to
construct penalty terms [1]. This class supports the following inequality constraints.
.. math::
\begin{array}{}
\text { Inequality constraint } & & \text { Penalty term } \\
x \leq y & \rightarrow & P(x-x y) \\
x \geq y & \rightarrow & P(y-x y) \\
\sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\
\sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j)
\end{array}
Note that x, y, z and :math:`x_i` are binary variables, and P is a penalty factor,
where the value of P is automatically determined or supplied by users.
If constraints match with any of the patterns, they are converted into penalty terms and added
to the objective function. Otherwise, constraints are kept as is.
References:
[1]: <NAME>, et al. (2019),
A Tutorial on Formulating and Using QUBO Models,
`arXiv:1811.11538 <https://arxiv.org/abs/1811.11538>`_.
"""
def __init__(self, penalty: Optional[float] = None) -> None:
"""
Args:
penalty: Penalty factor to scale equality constraints that are added to objective.
If None is passed, a penalty factor will be automatically calculated on
every conversion.
"""
self._src_num_vars: Optional[int] = None
self._dst: Optional[QuadraticProgram] = None
self._penalty: Optional[float] = penalty
self._should_define_penalty: bool = penalty is None
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
r"""Convert inequality constraints into penalty terms of the objective function.
This methods converts the following patterns where x, y, and :math:`x_i` are binary variables
and P is a penalty factor.
.. math::
\begin{array}{}
\text { Inequality constraint } & & \text { Penalty term } \\
x \leq y & \rightarrow & P(x-x y) \\
x \geq y & \rightarrow & P(y-x y) \\
\sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\
\sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j)
\end{array}
Args:
problem: The problem to be solved.
Returns:
The converted problem
Raises:
QiskitOptimizationError: If an unsupported-type variable exists.
"""
# create empty QuadraticProgram model
self._src_num_vars = problem.get_num_vars()
self._dst = QuadraticProgram(name=problem.name)
# If no penalty was given, set the penalty coefficient by _auto_define_penalty()
if self._should_define_penalty:
penalty = self._auto_define_penalty(problem)
else:
penalty = self._penalty
# Set variables
for x in problem.variables:
if x.vartype == Variable.Type.CONTINUOUS:
self._dst.continuous_var(x.lowerbound, x.upperbound, x.name)
elif x.vartype == Variable.Type.BINARY:
self._dst.binary_var(x.name)
elif x.vartype == Variable.Type.INTEGER:
self._dst.integer_var(x.lowerbound, x.upperbound, x.name)
else:
raise QiskitOptimizationError(f"Unsupported vartype: {x.vartype}")
# get original objective terms
offset = problem.objective.constant
linear = problem.objective.linear.to_dict()
quadratic = problem.objective.quadratic.to_dict()
sense = problem.objective.sense.value
# convert linear constraints into penalty terms
for constraint in problem.linear_constraints:
# special constraint check function here
if not self._is_matched_constraint(problem, constraint):
self._dst.linear_constraint(
constraint.linear.coefficients,
constraint.sense,
constraint.rhs,
constraint.name,
)
continue
conv_offset, conv_linear, conv_quadratic, varmap = self._conversion_table(constraint)
# constant part
offset += sense * penalty * conv_offset
# linear parts of penalty
for j, j_2 in varmap.items():
# if j already exists in the linear terms dic, add a penalty term
# into existing value else create new key and value in the linear_term dict
if conv_linear[j] != 0:
linear[j_2] = linear.get(j_2, 0.0) + sense * penalty * conv_linear[j]
# quadratic parts of penalty
for j, j_2 in varmap.items():
for k in range(j, len(varmap)):
# if j and k already exist in the quadratic terms dict,
# add a penalty term into existing value
# else create new key and value in the quadratic term dict
if conv_quadratic[j][k] != 0:
tup = (j_2, varmap[k])
quadratic[tup] = (
quadratic.get(tup, 0.0) + sense * penalty * conv_quadratic[j][k]
)
# Copy quadratic_constraints
for quadratic_constraint in problem.quadratic_constraints:
self._dst.quadratic_constraint(
quadratic_constraint.linear.coefficients,
quadratic_constraint.quadratic.coefficients,
quadratic_constraint.sense,
quadratic_constraint.rhs,
quadratic_constraint.name,
)
if problem.objective.sense == QuadraticObjective.Sense.MINIMIZE:
self._dst.minimize(offset, linear, quadratic)
else:
self._dst.maximize(offset, linear, quadratic)
# Update the penalty to the one just used
self._penalty = penalty
return self._dst
@staticmethod
def _conversion_table(
constraint,
) -> Tuple[int, np.ndarray, np.ndarray, Dict[int, int]]:
"""Construct conversion matrix for special constraint.
Returns:
Return conversion table which is used to construct
penalty term in main function.
Raises:
QiskitOptimizationError: if the constraint is invalid.
"""
vars_dict = constraint.linear.to_dict()
coeffs = list(vars_dict.values())
varmap = dict(enumerate(vars_dict.keys()))
rhs = constraint.rhs
sense = constraint.sense
num_vars = len(vars_dict)
# initialize return values, these are used for converted offset, linear
# and quadratic terms
offset = 0
linear = np.zeros(num_vars, dtype=int)
quadratic = np.zeros((num_vars, num_vars), dtype=int)
# rhs = num_vars - 1 correspond to multiple variable with >= n - 1 case.
if sense == ConstraintSense.GE and rhs == num_vars - 1:
# x_1 + ... + x_n >= n - 1
# The number of offset is combination ( nC2 )
offset = num_vars * (num_vars - 1) // 2
linear = np.full(num_vars, 1 - num_vars, dtype=int)
quadratic = np.triu(np.ones((num_vars, num_vars), dtype=int), k=1)
elif sense == ConstraintSense.LE and rhs == 1:
# x_1 + ... + x_n <= 1
quadratic = np.triu(np.ones((num_vars, num_vars), dtype=int), k=1)
elif rhs == 0:
if num_vars != 2:
raise QiskitOptimizationError(
f"Internal error: invalid number of variables {num_vars} {constraint.name}"
)
quadratic = np.array([[0, -1], [0, 0]])
if sense == ConstraintSense.GE:
# x >= y case
if coeffs[0] < 0.0:
linear[0] = 1
else:
linear[1] = 1
elif sense == ConstraintSense.LE:
# x <= y case
if coeffs[0] > 0.0:
linear[0] = 1
else:
linear[1] = 1
else:
raise QiskitOptimizationError(f"Internal error: invalid constraint {constraint.name}")
return offset, linear, quadratic, varmap
@staticmethod
def _is_matched_constraint(problem, constraint) -> bool:
"""Determine if constraint is special or not.
Returns:
True: when constraint is special
False: when constraint is not special
"""
params = constraint.linear.to_dict()
num_vars = len(params)
rhs = constraint.rhs
sense = constraint.sense
coeff_array = np.array(list(params.values()))
# Binary parameter?
if any(problem.variables[i].vartype != Variable.Type.BINARY for i in params.keys()):
return False
if num_vars == 2 and rhs == 0:
if sense in (Constraint.Sense.LE, Constraint.Sense.GE):
# x-y<=0
# x-y>=0
return coeff_array.min() == -1.0 and coeff_array.max() == 1.0
elif num_vars >= 2:
if sense == Constraint.Sense.LE and rhs == 1:
if all(i == 1 for i in params.values()):
# x1+x2+...<=1
return True
elif sense == Constraint.Sense.GE and rhs == num_vars - 1:
if all(i == 1 for i in params.values()):
# x1+x2+...>=n-1
return True
return False
@staticmethod
def _auto_define_penalty(problem) -> float:
"""Automatically define the penalty coefficient.
Returns:
Return the minimum valid penalty factor calculated
from the upper bound and the lower bound of the objective function.
If a constraint has a float coefficient,
return the default value for the penalty factor.
"""
default_penalty = 1e5
# Check coefficients of constraints.
# If a constraint has a float coefficient, return the default value for the penalty factor.
terms = []
for constraint in problem.linear_constraints:
terms.append(constraint.rhs)
terms.extend(constraint.linear.to_array().tolist())
if any(isinstance(term, float) and not term.is_integer() for term in terms):
logger.warning(
"Warning: Using %f for the penalty coefficient because "
"a float coefficient exists in constraints. \n"
"The value could be too small. "
"If so, set the penalty coefficient manually.",
default_penalty,
)
return default_penalty
lin_b = problem.objective.linear.bounds
quad_b = problem.objective.quadratic.bounds
return 1.0 + (lin_b.upperbound - lin_b.lowerbound) + (quad_b.upperbound - quad_b.lowerbound)
def interpret(self, x: Union[np.ndarray, List[float]]) -> np.ndarray:
"""Convert the result of the converted problem back to that of the original problem
Args:
x: The result of the converted problem or the given result in case of FAILURE.
Returns:
The result of the original problem.
Raises:
QiskitOptimizationError: if the number of variables in the result differs from
that of the original problem.
"""
if len(x) != self._src_num_vars:
raise QiskitOptimizationError(
f"The number of variables in the passed result ({len(x)}) differs from "
f"that of the original problem ({self._src_num_vars})."
)
return np.asarray(x)
@property
def penalty(self) -> Optional[float]:
"""Returns the penalty factor used in conversion.
Returns:
The penalty factor used in conversion.
"""
return self._penalty
@penalty.setter
def penalty(self, penalty: Optional[float]) -> None:
"""Set a new penalty factor.
Args:
penalty: The new penalty factor.
If None is passed, a penalty factor will be automatically calculated
on every conversion.
"""
self._penalty = penalty
self._should_define_penalty = penalty is None
| [
"logging.getLogger",
"numpy.ones",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.full"
] | [((1026, 1053), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1043, 1053), False, 'import logging\n'), ((8151, 8180), 'numpy.zeros', 'np.zeros', (['num_vars'], {'dtype': 'int'}), '(num_vars, dtype=int)\n', (8159, 8180), True, 'import numpy as np\n'), ((8201, 8242), 'numpy.zeros', 'np.zeros', (['(num_vars, num_vars)'], {'dtype': 'int'}), '((num_vars, num_vars), dtype=int)\n', (8209, 8242), True, 'import numpy as np\n'), ((13159, 13172), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (13169, 13172), True, 'import numpy as np\n'), ((8559, 8601), 'numpy.full', 'np.full', (['num_vars', '(1 - num_vars)'], {'dtype': 'int'}), '(num_vars, 1 - num_vars, dtype=int)\n', (8566, 8601), True, 'import numpy as np\n'), ((8634, 8674), 'numpy.ones', 'np.ones', (['(num_vars, num_vars)'], {'dtype': 'int'}), '((num_vars, num_vars), dtype=int)\n', (8641, 8674), True, 'import numpy as np\n'), ((8803, 8843), 'numpy.ones', 'np.ones', (['(num_vars, num_vars)'], {'dtype': 'int'}), '((num_vars, num_vars), dtype=int)\n', (8810, 8843), True, 'import numpy as np\n'), ((9088, 9115), 'numpy.array', 'np.array', (['[[0, -1], [0, 0]]'], {}), '([[0, -1], [0, 0]])\n', (9096, 9115), True, 'import numpy as np\n')] |
from flask import Blueprint, abort, request, jsonify
from prediction_utils import *
prediction_page = Blueprint('prediction_page', __name__)
@prediction_page.route('/map', methods=['GET'])
def get_map_data():
offset = int(request.args['offset'])
country = request.args['country'] if 'country' in request.args else ''
disease = request.args['disease'] if 'disease' in request.args else ''
return jsonify(get_outbreaks_by_country(offset, country, disease))
@prediction_page.route('/table', methods=['GET'])
def get_table_data():
return jsonify(get_outbreaks()) | [
"flask.Blueprint"
] | [((103, 141), 'flask.Blueprint', 'Blueprint', (['"""prediction_page"""', '__name__'], {}), "('prediction_page', __name__)\n", (112, 141), False, 'from flask import Blueprint, abort, request, jsonify\n')] |
from django.urls import path
from . import views
urlpatterns = [
path("draugiem/login/", views.login, name="draugiem_login"),
path("draugiem/callback/", views.callback, name="draugiem_callback"),
]
| [
"django.urls.path"
] | [((72, 131), 'django.urls.path', 'path', (['"""draugiem/login/"""', 'views.login'], {'name': '"""draugiem_login"""'}), "('draugiem/login/', views.login, name='draugiem_login')\n", (76, 131), False, 'from django.urls import path\n'), ((137, 205), 'django.urls.path', 'path', (['"""draugiem/callback/"""', 'views.callback'], {'name': '"""draugiem_callback"""'}), "('draugiem/callback/', views.callback, name='draugiem_callback')\n", (141, 205), False, 'from django.urls import path\n')] |
from typing import Optional
from ctypes import *
from vcx.common import do_call, create_cb
from vcx.api.connection import Connection
from vcx.api.vcx_stateful import VcxStateful
import json
class DisclosedProof(VcxStateful):
def __init__(self, source_id: str):
VcxStateful.__init__(self, source_id)
self._name = source_id
self._proof_req = None
def __del__(self):
self.release()
self.logger.debug("Deleted {} obj: {}".format(DisclosedProof, self.handle))
@property
def proof_request(self):
return self._proof_req
@proof_request.setter
def proof_request(self, x):
self._proof_req = x
@staticmethod
async def create(source_id: str, proof_request: str):
constructor_params = (source_id,)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_proof_request = c_char_p(json.dumps(proof_request).encode('utf-8'))
c_params = (c_source_id, c_proof_request, )
return await DisclosedProof._create("vcx_disclosed_proof_create_with_request",
constructor_params,
c_params)
@staticmethod
async def create_with_msgid(source_id: str, connection: Connection, msg_id: str):
proof = DisclosedProof(source_id)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_msg_id = c_char_p(json.dumps(msg_id).encode('utf-8'))
c_connection_handle = c_uint32(connection.handle)
if not hasattr(DisclosedProof.create_with_msgid, "cb"):
DisclosedProof.create_with_msgid.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p))
proof.handle, proof_req = await do_call('vcx_disclosed_proof_create_with_msgid',
c_source_id,
c_connection_handle,
c_msg_id,
DisclosedProof.create_with_msgid.cb)
proof.proof_request = json.loads(proof_req.decode())
return proof
@staticmethod
async def deserialize(data: dict):
disclosed_proof = await DisclosedProof._deserialize("vcx_disclosed_proof_deserialize",
json.dumps(data),
data.get('source_id'))
return disclosed_proof
@staticmethod
async def get_requests(connection: Connection) -> dict:
if not hasattr(DisclosedProof.get_requests, "cb"):
DisclosedProof.get_requests.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_connection_handle = c_uint32(connection.handle)
data = await do_call('vcx_disclosed_proof_get_requests',
c_connection_handle,
DisclosedProof.get_requests.cb)
return json.loads(data.decode())
async def serialize(self) -> dict:
return await self._serialize(DisclosedProof, 'vcx_disclosed_proof_serialize')
async def update_state(self) -> int:
return await self._update_state(DisclosedProof, 'vcx_disclosed_proof_update_state')
async def get_state(self) -> int:
return await self._get_state(DisclosedProof, 'vcx_disclosed_proof_get_state')
def release(self) -> None:
self._release(DisclosedProof, 'vcx_disclosed_proof_release')
async def get_creds(self) -> dict:
if not hasattr(DisclosedProof.get_creds, "cb"):
self.logger.debug("vcx_disclosed_proof_retrieve_credentials: Creating callback")
DisclosedProof.send_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_disclosed_proof_handle = c_uint32(self.handle)
data = await do_call('vcx_disclosed_proof_retrieve_credentials',
c_disclosed_proof_handle,
DisclosedProof.send_proof.cb)
return json.loads(data.decode())
async def send_proof(self, connection: Connection):
if not hasattr(DisclosedProof.send_proof, "cb"):
self.logger.debug("vcx_disclosed_proof_send_proof: Creating callback")
DisclosedProof.send_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_disclosed_proof_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
await do_call('vcx_disclosed_proof_send_proof',
c_disclosed_proof_handle,
c_connection_handle,
DisclosedProof.send_proof.cb)
async def generate_proof(self, selected_creds: dict, self_attested_attrs: dict):
if not hasattr(DisclosedProof.send_proof, "cb"):
self.logger.debug("vcx_disclosed_proof_generate_proof: Creating callback")
DisclosedProof.send_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_disclosed_proof_handle = c_uint32(self.handle)
c_selected_creds = c_char_p(json.dumps(selected_creds).encode('utf-8'))
c_self_attested_attrs = c_char_p(json.dumps(self_attested_attrs).encode('utf-8'))
await do_call('vcx_disclosed_proof_generate_proof',
c_disclosed_proof_handle,
c_selected_creds,
c_self_attested_attrs,
DisclosedProof.send_proof.cb)
| [
"vcx.common.do_call",
"json.dumps",
"vcx.api.vcx_stateful.VcxStateful.__init__"
] | [((277, 314), 'vcx.api.vcx_stateful.VcxStateful.__init__', 'VcxStateful.__init__', (['self', 'source_id'], {}), '(self, source_id)\n', (297, 314), False, 'from vcx.api.vcx_stateful import VcxStateful\n'), ((1717, 1850), 'vcx.common.do_call', 'do_call', (['"""vcx_disclosed_proof_create_with_msgid"""', 'c_source_id', 'c_connection_handle', 'c_msg_id', 'DisclosedProof.create_with_msgid.cb'], {}), "('vcx_disclosed_proof_create_with_msgid', c_source_id,\n c_connection_handle, c_msg_id, DisclosedProof.create_with_msgid.cb)\n", (1724, 1850), False, 'from vcx.common import do_call, create_cb\n'), ((2777, 2877), 'vcx.common.do_call', 'do_call', (['"""vcx_disclosed_proof_get_requests"""', 'c_connection_handle', 'DisclosedProof.get_requests.cb'], {}), "('vcx_disclosed_proof_get_requests', c_connection_handle,\n DisclosedProof.get_requests.cb)\n", (2784, 2877), False, 'from vcx.common import do_call, create_cb\n'), ((3815, 3926), 'vcx.common.do_call', 'do_call', (['"""vcx_disclosed_proof_retrieve_credentials"""', 'c_disclosed_proof_handle', 'DisclosedProof.send_proof.cb'], {}), "('vcx_disclosed_proof_retrieve_credentials',\n c_disclosed_proof_handle, DisclosedProof.send_proof.cb)\n", (3822, 3926), False, 'from vcx.common import do_call, create_cb\n'), ((4440, 4562), 'vcx.common.do_call', 'do_call', (['"""vcx_disclosed_proof_send_proof"""', 'c_disclosed_proof_handle', 'c_connection_handle', 'DisclosedProof.send_proof.cb'], {}), "('vcx_disclosed_proof_send_proof', c_disclosed_proof_handle,\n c_connection_handle, DisclosedProof.send_proof.cb)\n", (4447, 4562), False, 'from vcx.common import do_call, create_cb\n'), ((5188, 5334), 'vcx.common.do_call', 'do_call', (['"""vcx_disclosed_proof_generate_proof"""', 'c_disclosed_proof_handle', 'c_selected_creds', 'c_self_attested_attrs', 'DisclosedProof.send_proof.cb'], {}), "('vcx_disclosed_proof_generate_proof', c_disclosed_proof_handle,\n c_selected_creds, c_self_attested_attrs, DisclosedProof.send_proof.cb)\n", (5195, 5334), False, 'from vcx.common import do_call, create_cb\n'), ((2330, 2346), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2340, 2346), False, 'import json\n'), ((883, 908), 'json.dumps', 'json.dumps', (['proof_request'], {}), '(proof_request)\n', (893, 908), False, 'import json\n'), ((1400, 1418), 'json.dumps', 'json.dumps', (['msg_id'], {}), '(msg_id)\n', (1410, 1418), False, 'import json\n'), ((5039, 5065), 'json.dumps', 'json.dumps', (['selected_creds'], {}), '(selected_creds)\n', (5049, 5065), False, 'import json\n'), ((5124, 5155), 'json.dumps', 'json.dumps', (['self_attested_attrs'], {}), '(self_attested_attrs)\n', (5134, 5155), False, 'import json\n')] |
import sys
import requests
import json
import argparse
import time
parser = argparse.ArgumentParser(description='Collects monitoring data from Pingdom.')
parser.add_argument('-u', '--pingdom-user-name', help='The Pingdom User Name', required=True)
parser.add_argument('-p', '--pingdom-password', help='The Pingdom Password', required=True)
parser.add_argument('-a', '--pingdom-api-key', help='The Pingdom API-KEY', required=True)
class Pingdom:
def __init__(self, api_key, user_name, password):
self.api_key = api_key,
self.user_name = user_name,
self.password = password,
self.jsonData = []
def handle_error(self, error_message):
sys.stderr.write("ERROR:|Pingdom| " + error_message)
sys.exit(1)
def call_api(self, api):
headers = {'App-Key': self.api_key[0]}
base_api = 'https://api.pingdom.com/api/2.0/' + api
response = requests.get(base_api, headers=headers, auth=requests.auth.HTTPBasicAuth(self.user_name[0], self.password[0]))
if response.status_code == 200:
return response.json()
else:
self.handle_error("API [" + base_api + "] failed to execute with error code [" + str(response.status_code) + "].")
def get_checks(self):
response = self.call_api('checks')
data = response.get("checks")
counts = response.get("counts")
up_count = 0
down_count = 0
unconfirmed_down_count = 0
unknown_count = 0
paused_count = 0
for x in data:
status = x.get("status")
if status == "up":
up_count = up_count + 1
elif status == "down":
down_count == down_count + 1
elif status == "unconfirmed_down":
unconfirmed_down_count = unconfirmed_down_count + 1
elif status == "unknown":
unknown_count = unknown_count + 1
elif status == "paused":
paused_count = paused_count + 1
counts["up"] = up_count
counts["down"] = down_count
counts["unconfirmed_down"] = unconfirmed_down_count
counts["unknown"] = unknown_count
counts["paused"] = paused_count
data.append(counts)
self.jsonData = data
def get_credits(self):
response = self.call_api('credits')
self.jsonData.append(response)
def get_maintenance(self):
response = self.call_api('maintenance')
if response.get('maintenance'):
for mw in response.get('maintenance'):
window = {}
window["description"] = mw.get("description")
window["recurrencetype"] = mw.get("recurrencetype")
window["repeatevery"] = mw.get("repeatevery")
window["from"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mw.get("from")))
window["to"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mw.get("to")))
window["window"] = 1
self.jsonData.append(window)
if __name__ == "__main__":
try:
args = parser.parse_args()
pingdom = Pingdom(args.pingdom_api_key, args.pingdom_user_name, args.pingdom_password)
pingdom.get_checks()
pingdom.get_credits()
pingdom.get_maintenance()
print(json.dumps(pingdom.jsonData))
except Exception as e:
pingdom.handle_error(e.message)
| [
"requests.auth.HTTPBasicAuth",
"argparse.ArgumentParser",
"json.dumps",
"sys.stderr.write",
"sys.exit"
] | [((77, 154), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collects monitoring data from Pingdom."""'}), "(description='Collects monitoring data from Pingdom.')\n", (100, 154), False, 'import argparse\n'), ((682, 734), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR:|Pingdom| ' + error_message)"], {}), "('ERROR:|Pingdom| ' + error_message)\n", (698, 734), False, 'import sys\n'), ((743, 754), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (751, 754), False, 'import sys\n'), ((3344, 3372), 'json.dumps', 'json.dumps', (['pingdom.jsonData'], {}), '(pingdom.jsonData)\n', (3354, 3372), False, 'import json\n'), ((956, 1020), 'requests.auth.HTTPBasicAuth', 'requests.auth.HTTPBasicAuth', (['self.user_name[0]', 'self.password[0]'], {}), '(self.user_name[0], self.password[0])\n', (983, 1020), False, 'import requests\n')] |
#!/usr/bin/env python
# -----------------------------------------------------
# Written by <NAME> on 2021/3/28.
# -----------------------------------------------------
import sys
from pycocotools.coco_analyze import COCOAnalyze, my_plot
annFile = sys.argv[1]
coco = COCOAnalyze(annFile)
# get all cats
catNms = 'all'
catIds = coco.getCatIds()
# get specific cats
# catNms = ['yin_hua_qing_xie']
# catIds = coco.getCatIds(catNms=catNms)
widths = coco.getBBoxWidths(catIds=catIds)
my_plot(data=widths, label=catNms, name='widths')
aspect_ratios = coco.getBBoxAspectRatios(catIds=catIds)
my_plot(data=aspect_ratios, label=catNms, name='aspect_ratios')
| [
"pycocotools.coco_analyze.COCOAnalyze",
"pycocotools.coco_analyze.my_plot"
] | [((269, 289), 'pycocotools.coco_analyze.COCOAnalyze', 'COCOAnalyze', (['annFile'], {}), '(annFile)\n', (280, 289), False, 'from pycocotools.coco_analyze import COCOAnalyze, my_plot\n'), ((485, 534), 'pycocotools.coco_analyze.my_plot', 'my_plot', ([], {'data': 'widths', 'label': 'catNms', 'name': '"""widths"""'}), "(data=widths, label=catNms, name='widths')\n", (492, 534), False, 'from pycocotools.coco_analyze import COCOAnalyze, my_plot\n'), ((592, 655), 'pycocotools.coco_analyze.my_plot', 'my_plot', ([], {'data': 'aspect_ratios', 'label': 'catNms', 'name': '"""aspect_ratios"""'}), "(data=aspect_ratios, label=catNms, name='aspect_ratios')\n", (599, 655), False, 'from pycocotools.coco_analyze import COCOAnalyze, my_plot\n')] |
import logging
from typing import Any, List, Optional
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
)
from gehomesdk import ErdAcFanSetting
from ..common import OptionsConverter
_LOGGER = logging.getLogger(__name__)
class AcFanModeOptionsConverter(OptionsConverter):
def __init__(self, default_option: ErdAcFanSetting = ErdAcFanSetting.AUTO):
self._default = default_option
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdAcFanSetting.AUTO, ErdAcFanSetting.LOW, ErdAcFanSetting.MED, ErdAcFanSetting.HIGH]]
def from_option_string(self, value: str) -> Any:
try:
return ErdAcFanSetting[value.upper().replace(" ","_")]
except:
_LOGGER.warn(f"Could not set fan mode to {value}")
return self._default
def to_option_string(self, value: Any) -> Optional[str]:
try:
return {
ErdAcFanSetting.AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.LOW: ErdAcFanSetting.LOW,
ErdAcFanSetting.LOW_AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.MED: ErdAcFanSetting.MED,
ErdAcFanSetting.MED_AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.HIGH: ErdAcFanSetting.HIGH,
ErdAcFanSetting.HIGH_AUTO: ErdAcFanSetting.HIGH
}.get(value).stringify()
except:
pass
return self._default.stringify()
class AcFanOnlyFanModeOptionsConverter(AcFanModeOptionsConverter):
def __init__(self):
super().__init__(ErdAcFanSetting.LOW)
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdAcFanSetting.LOW, ErdAcFanSetting.MED, ErdAcFanSetting.HIGH]]
| [
"logging.getLogger"
] | [((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n')] |
""" Decision Trees - Supervised learning: 1-Classification*, 2-Regression.
D.T.s are a non-parametric supervised learning method used for classification and regression. The goal is to create a
model that predicts the value of a target variable by learning simple decision rules inferred from the data features.
Some advantages of decision trees are:
1- Simple to understand and to interpret. Trees can be visualised.
2- Requires little data preparation. Other techniques often require data normalisation, dummy variables need to be
created and blank values to be removed. Note however that this module does not support missing values.
3- The cost of using the tree (i.e., predicting data) is logarithmic in the number of data points used to train the tree.
4- Able to handle both numerical and categorical data. Other techniques are usually specialised in analysing datasets
that have only one type of variable. See algorithms for more information.
5- Able to handle multi-output problems.
6- Uses a white box model. If a given situation is observable in a model, the explanation for the condition is easily
explained by boolean logic. By contrast, in a black box model (e.g., in an artificial neural network), results may be
more difficult to interpret.
7- Possible to validate a model using statistical tests. That makes it possible to account for the reliability
of the model.
8- Performs well even if its assumptions are somewhat violated by the true model from which the data were generated.
The disadvantages of decision trees include:
1- Decision-tree learners can create over-complex trees that do not generalise the data well.This is called overfitting.
Mechanisms such as pruning (not currently supported), setting the minimum number of samples required at a leaf node or
setting the maximum depth of the tree are necessary to avoid this problem.
2- Decision trees can be unstable because small variations in the data might result in a completely different tree
being generated. This problem is mitigated by using decision trees within an ensemble.
3- The problem of learning an optimal decision tree is known to be NP-complete under several aspects of optimality and
even for simple concepts. Consequently, practical decision-tree learning algorithms are based on heuristic algorithms
such as the greedy algorithm where locally optimal decisions are made at each node. Such algorithms cannot guarantee to
return the globally optimal decision tree. This can be mitigated by training multiple trees in an ensemble learner,
where the features and samples are randomly sampled with replacement.
4- There are concepts that are hard to learn because decision trees do not express them easily, such as XOR, parity or
multiplexer problems.
5- Decision tree learners create biased trees if some classes dominate. It is therefore recommended to balance the
dataset prior to fitting with the decision tree.
ID3 (Iterative Dichotomiser 3) was developed in 1986 by <NAME>. The algorithm creates a multiway tree, finding
for each node (i.e. in a greedy manner) the categorical feature that will yield the largest information gain for
categorical targets. Trees are grown to their maximum size and then a pruning step is usually applied to improve the
ability of the tree to generalise to unseen data.
C4.5 is the successor to ID3 and removed the restriction that features must be categorical by dynamically defining a
discrete attribute (based on numerical variables) that partitions the continuous attribute value into a discrete set
of intervals. C4.5 converts the trained trees (i.e. the output of the ID3 algorithm) into sets of if-then rules.
These accuracy of each rule is then evaluated to determine the order in which they should be applied.
Pruning is done by removing a rule's precondition if the accuracy of the rule improves without it.
"""
import numpy as np
from sklearn.externals import joblib
from sklearn import datasets, metrics, tree
from sklearn.cross_validation import train_test_split
cancer = datasets.load_breast_cancer()
data = cancer.data
labels = cancer.target
data = np.asarray(data, dtype='float32')
labels = np.asarray(labels, dtype='int32')
trainData, testData, trainLabels, testLabels = train_test_split(data, labels, train_size=0.8, test_size=0.2)
print('Tree Learning... Fitting... ')
tree_clf = tree.DecisionTreeClassifier(criterion='entropy', splitter='best', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None,
random_state=None, max_leaf_nodes=None, class_weight=None, presort=False)
tree_clf.fit(X=trainData, y=trainLabels)
print('Tree Predicting... ')
predicted = tree_clf.predict(X=testData)
print("Results: \n %s" % metrics.classification_report(testLabels, predicted))
matrix = metrics.confusion_matrix(testLabels, predicted)
print("Confusion Matrix: \n %s" % matrix)
print("\nMean Accuracy: %.4f " % tree_clf.score(X=testData, y=testLabels))
print("Tree Saving in ... /Output/Tree_model.pkl")
joblib.dump(tree_clf, '/home/rainer85ah/PycharmProjects/DiagnosticCancerSolution/Output/Tree_model.pkl')
| [
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"numpy.asarray",
"sklearn.datasets.load_breast_cancer",
"sklearn.cross_validation.train_test_split",
"sklearn.externals.joblib.dump",
"sklearn.metrics.confusion_matrix"
] | [((4019, 4048), 'sklearn.datasets.load_breast_cancer', 'datasets.load_breast_cancer', ([], {}), '()\n', (4046, 4048), False, 'from sklearn import datasets, metrics, tree\n'), ((4099, 4132), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': '"""float32"""'}), "(data, dtype='float32')\n", (4109, 4132), True, 'import numpy as np\n'), ((4142, 4175), 'numpy.asarray', 'np.asarray', (['labels'], {'dtype': '"""int32"""'}), "(labels, dtype='int32')\n", (4152, 4175), True, 'import numpy as np\n'), ((4223, 4284), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['data', 'labels'], {'train_size': '(0.8)', 'test_size': '(0.2)'}), '(data, labels, train_size=0.8, test_size=0.2)\n', (4239, 4284), False, 'from sklearn.cross_validation import train_test_split\n'), ((4335, 4593), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'splitter': '"""best"""', 'max_depth': 'None', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)', 'min_weight_fraction_leaf': '(0.0)', 'max_features': 'None', 'random_state': 'None', 'max_leaf_nodes': 'None', 'class_weight': 'None', 'presort': '(False)'}), "(criterion='entropy', splitter='best', max_depth\n =None, min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_features=None, random_state=None,\n max_leaf_nodes=None, class_weight=None, presort=False)\n", (4362, 4593), False, 'from sklearn import datasets, metrics, tree\n'), ((4860, 4907), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['testLabels', 'predicted'], {}), '(testLabels, predicted)\n', (4884, 4907), False, 'from sklearn import datasets, metrics, tree\n'), ((5077, 5190), 'sklearn.externals.joblib.dump', 'joblib.dump', (['tree_clf', '"""/home/rainer85ah/PycharmProjects/DiagnosticCancerSolution/Output/Tree_model.pkl"""'], {}), "(tree_clf,\n '/home/rainer85ah/PycharmProjects/DiagnosticCancerSolution/Output/Tree_model.pkl'\n )\n", (5088, 5190), False, 'from sklearn.externals import joblib\n'), ((4797, 4849), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['testLabels', 'predicted'], {}), '(testLabels, predicted)\n', (4826, 4849), False, 'from sklearn import datasets, metrics, tree\n')] |
__author__ = '<NAME>'
__version__ = '2.0'
from flask import Flask
from hackathon.functions import safe_get_config
from flask_restful import Api
from flask_cors import CORS
# flask
app = Flask(__name__)
app.config['SECRET_KEY'] = '*K&ep_me^se(ret_!@#$'
# flask restful
api = Api(app)
# CORS
app.config['CORS_HEADERS'] = 'Content-Type, token'
cors = CORS(app)
from . import views
### example of scheduler
# from scheduler import scheduler
# from datetime import datetime, timedelta
#
# def alarm(time):
# print('Alarm! This alarm was scheduled at %s.' % time)
# return {
# "key": "val"
# }
#
# alarm_time = datetime.now() + timedelta(seconds=10)
# scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()])
| [
"flask_restful.Api",
"flask_cors.CORS",
"flask.Flask"
] | [((188, 203), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (193, 203), False, 'from flask import Flask\n'), ((277, 285), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (280, 285), False, 'from flask_restful import Api\n'), ((352, 361), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (356, 361), False, 'from flask_cors import CORS\n')] |
from models import Action, Block, Detect, Discovery, Edge, Node
import renderer
if __name__ == "__main__":
root = Node(label="Reality")
goal = Node(label="Attacker gets data from bucket")
apiCache = Action(
label="Search API Caches",
chain="recon",
cost=0,
time=3,
objective="Discover bucket paths",
pSuccess=1.0
)
root.createEdge(apiCache,label="#Yolosec")
s3urls = Discovery(
label="S3 Urls",
description="The URL paths to various S3 buckets",
sensitivity=3,
value=0
)
apiCache.createEdge(s3urls, label="#Yolosec")
downloadFiles = Action(
chain="exfiltration",
label="Download files from all buckets",
cost=0,
time=1,
objective="Access confidential information stored in S3",
pSuccess=1.0,
detections=["CloudWatch","DLP"]
)
s3urls.createEdge(downloadFiles, label="#Yolosec")
downloadFiles.createEdge(goal, label="#Yolosec")
style = renderer.loadStyle('style.json')
renderer.render(
node=root,
renderUnimplemented=True,
style=style,
fname="example_simpleS3",
fout="png"
) | [
"models.Discovery",
"renderer.loadStyle",
"models.Node",
"renderer.render",
"models.Action"
] | [((119, 140), 'models.Node', 'Node', ([], {'label': '"""Reality"""'}), "(label='Reality')\n", (123, 140), False, 'from models import Action, Block, Detect, Discovery, Edge, Node\n'), ((152, 196), 'models.Node', 'Node', ([], {'label': '"""Attacker gets data from bucket"""'}), "(label='Attacker gets data from bucket')\n", (156, 196), False, 'from models import Action, Block, Detect, Discovery, Edge, Node\n'), ((213, 331), 'models.Action', 'Action', ([], {'label': '"""Search API Caches"""', 'chain': '"""recon"""', 'cost': '(0)', 'time': '(3)', 'objective': '"""Discover bucket paths"""', 'pSuccess': '(1.0)'}), "(label='Search API Caches', chain='recon', cost=0, time=3, objective=\n 'Discover bucket paths', pSuccess=1.0)\n", (219, 331), False, 'from models import Action, Block, Detect, Discovery, Edge, Node\n'), ((442, 548), 'models.Discovery', 'Discovery', ([], {'label': '"""S3 Urls"""', 'description': '"""The URL paths to various S3 buckets"""', 'sensitivity': '(3)', 'value': '(0)'}), "(label='S3 Urls', description=\n 'The URL paths to various S3 buckets', sensitivity=3, value=0)\n", (451, 548), False, 'from models import Action, Block, Detect, Discovery, Edge, Node\n'), ((653, 853), 'models.Action', 'Action', ([], {'chain': '"""exfiltration"""', 'label': '"""Download files from all buckets"""', 'cost': '(0)', 'time': '(1)', 'objective': '"""Access confidential information stored in S3"""', 'pSuccess': '(1.0)', 'detections': "['CloudWatch', 'DLP']"}), "(chain='exfiltration', label='Download files from all buckets', cost=\n 0, time=1, objective='Access confidential information stored in S3',\n pSuccess=1.0, detections=['CloudWatch', 'DLP'])\n", (659, 853), False, 'from models import Action, Block, Detect, Discovery, Edge, Node\n'), ((1027, 1059), 'renderer.loadStyle', 'renderer.loadStyle', (['"""style.json"""'], {}), "('style.json')\n", (1045, 1059), False, 'import renderer\n'), ((1064, 1172), 'renderer.render', 'renderer.render', ([], {'node': 'root', 'renderUnimplemented': '(True)', 'style': 'style', 'fname': '"""example_simpleS3"""', 'fout': '"""png"""'}), "(node=root, renderUnimplemented=True, style=style, fname=\n 'example_simpleS3', fout='png')\n", (1079, 1172), False, 'import renderer\n')] |
import math
from KratosMultiphysics import *
from KratosMultiphysics.BRepApplication import *
from KratosMultiphysics.IsogeometricApplication import *
###
### This module is a factory to generate typical geometries for isogeometric analysis, e.g. circle, l-shape, ...
###
nurbs_fespace_library = BSplinesFESpaceLibrary()
grid_lib = ControlGridLibrary()
multipatch_util = MultiPatchUtility()
multipatch_refine_util = MultiPatchRefinementUtility()
bsplines_patch_util = BSplinesPatchUtility()
### Compute cross product
def cross(c, a, b):
c[0] = a[1]*b[2] - a[2]*b[1]
c[1] = a[2]*b[0] - a[0]*b[2]
c[2] = a[0]*b[1] - a[1]*b[0]
return c
### Compute dot product
def dot(a, b):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
### Normalize a vector
def normalize(a):
norma = math.sqrt(a[0]**2 + a[1]**2 + a[2]**2)
a[0] = a[0] / norma
a[1] = a[1] / norma
a[2] = a[2] / norma
return a
### Compute Gaussian function
def gaussian(mu, sigma, x):
return math.exp(-0.5*((x-mu)/sigma)**2)/sigma/math.sqrt(2.0*math.pi)
### Compute inverse Gaussian function
def inv_gaussian1(mu, sigma, g):
return -sigma*math.sqrt(-2.0*math.log(sigma*math.sqrt(2*math.pi)*g)) + mu
### Compute inverse Gaussian function
def inv_gaussian2(mu, sigma, g):
return sigma*math.sqrt(-2.0*math.log(sigma*math.sqrt(2*math.pi)*g)) + mu
# ### Generate distributed Gaussian array in span (min, max). It is useful to generate a knot vector with Gaussian distribution for testing
# def GenerateGaussianArray(half_n, min_k, max_k, sigma):
# mu = 0.5*(min_k + max_k)
# max_g = gaussian(mu, sigma, mu)
# min_g = gaussian(mu, sigma, 0.0)
# print("min_g:", min_g)
# print("max_g:", max_g)
# print("mu:", inv_gaussian1(mu, sigma, max_g))
# print("mu:", inv_gaussian2(mu, sigma, max_g))
# k_list = []
# for i in range(0, half_n+1):
# t = float(i+1)/(half_n+1)
# g = t*(max_g-min_g) + min_g
# # print("g:", g)
# k = inv_gaussian1(mu, sigma, g)
# # print("k:", k)
# k_list.append(k)
# for i in range(0, half_n):
# t = float(half_n-i)/(half_n+1)
# g = t*(max_g-min_g) + min_g
# k = inv_gaussian2(mu, sigma, g)
# k_list.append(k)
# return k_list
# ### Generate distributed Gaussian array in span (min, max). It is useful to generate a knot vector with Gaussian distribution for testing
# def GenerateGaussianArray(n, min_k, max_k):
# mu = 0.0
# sigma = 1.0
# k_list = []
# g_list = []
# min_g = 0.0
# max_g = gaussian(mu, sigma, mu)
# sum_g = 0.0
# for i in range(0, n):
# t = 6.0*float(i)/(n-1) - 3.0;
# g = gaussian(mu, sigma, t)
# g_list.append(g)
# sum_g = sum_g + g
# t = 0.0
# for g in g_list:
# t = t + g
# k_list.append(t/sum_g)
# return k_list
### Generate distributed Gaussian array in span (min, max). It is useful to generate a knot vector with Gaussian distribution for testing
def GenerateGaussianArray(num_span, max_elem_in_span, sigma, min_k, max_k):
k_list = []
# make a span in [-3, 3]
for i in range(0, num_span):
min_t = -3.0 + float(i)/num_span*6.0
max_t = -3.0 + float(i+1)/num_span*6.0
# print("min_t:", min_t)
# print("max_t:", max_t)
# get a samping value in [min_t, max_t]
t = 0.5*(min_t + max_t)
# print("t:", t)
g = gaussian(0.0, sigma, t*sigma)
# print("g:", g)
n = int(max_elem_in_span*g)
# print("n:", n)
# generate n numbers from min_t to max_t
for j in range(0, n):
t = min_t + float(j+0.5)/n*(max_t-min_t)
t_scale = (t+3.0)/6.0;
k_list.append(t_scale*(max_k-min_k)+min_k)
return k_list
### Create a line from start_point to end_point with knot vector [0 0 0 ... 1 1 1]
### On output the pointer to the patch will be returned
def CreateLine(start_point, end_point, order = 1):
Id = 0
fes = nurbs_fespace_library.CreatePrimitiveFESpace(order)
ctrl_grid = grid_lib.CreateLinearControlPointGrid(start_point[0], start_point[1], start_point[2], fes.Number(0), end_point[0], end_point[1], end_point[2])
patch_ptr = multipatch_util.CreatePatchPointer(Id, fes)
patch = patch_ptr.GetReference()
patch.CreateControlPointGridFunction(ctrl_grid)
return patch_ptr
### Create a curve from the control point list, given as [ [x0, y0, z0], ... ]
### All the weight is assumed 1
def CreateCurve(points, order):
Id = 0
number = len(points)
fes = nurbs_fespace_library.CreateUniformFESpace(number, order)
ctrl_grid = StructuredControlPointGrid1D(number)
for i in range(0, number):
ctrl_grid.SetValue(i, ControlPoint(points[i][0], points[i][1], points[i][2], 1.0))
curve_ptr = multipatch_util.CreatePatchPointer(Id, fes)
curve = curve_ptr.GetReference()
curve.CreateControlPointGridFunction(ctrl_grid)
return curve_ptr
### Create an arc at center on the surface perpendicular to the given axis. By default, the quadratic arc is generated. The knot vector will be [0 0 0 1 1 1]
### On output the pointer to the patch will be returned. Small arc means that the open angle is less than 90 degrees.
def CreateSmallArc(center, axis, radius, start_angle, end_angle):
## firstly create an arc in xy plane at (0, 0)
Id = 0
fes = nurbs_fespace_library.CreatePrimitiveFESpace(2)
ctrl_grid = grid_lib.CreateLinearControlPointGrid(0.0, 0.0, 0.0, fes.Number(0), radius, 0.0, 0.0)
sweep = end_angle - start_angle
dsweep = 0.5*sweep/180.0*math.pi
wm = math.cos(dsweep)
x = radius*wm
y = radius*math.sin(dsweep)
xm = x + y*math.tan(dsweep)
if axis == 'z':
trans = RotationZ(start_angle + 0.5*sweep)
elif axis == 'y':
trans = RotationZ(start_angle + 0.5*sweep)
trans.AppendTransformation(RotationX(90.0))
elif axis == 'x':
trans = RotationZ(start_angle + 0.5*sweep + 90.0)
trans.AppendTransformation(RotationY(90.0))
trans.AppendTransformation(Translation(center[0], center[1], center[2]))
pt1 = ctrl_grid[0]
pt1.WX = x
pt1.WY = -y
pt1.WZ = 0.0
pt1.W = 1.0
pt1.ApplyTransformation(trans)
ctrl_grid[0] = pt1
pt2 = ctrl_grid[1]
pt2.WX = wm*xm
pt2.WY = 0.0
pt2.WZ = 0.0
pt2.W = wm
pt2.ApplyTransformation(trans)
ctrl_grid[1] = pt2
pt3 = ctrl_grid[2]
pt3.WX = x
pt3.WY = y
pt3.WZ = 0.0
pt3.W = 1.0
pt3.ApplyTransformation(trans)
ctrl_grid[2] = pt3
patch_ptr = multipatch_util.CreatePatchPointer(Id, fes)
patch = patch_ptr.GetReference()
patch.CreateControlPointGridFunction(ctrl_grid)
return patch_ptr
### Create a 2D ring at center on the surface perpendicular to the axis. By default, the quadratic arc is generated. The knot vector will be [0 0 0 1 1 1]
### On output the pointer to the patch will be returned. Small ring means that the open angle is less than 90 degrees.
def CreateSmallRing(center, axis, rin, rout, start_angle, end_angle):
## create inner arc
iarc_ptr = CreateSmallArc(center, axis, rin, start_angle, end_angle)
iarc = iarc_ptr.GetReference()
## create outer arc
oarc_ptr = CreateSmallArc(center, axis, rout, start_angle, end_angle)
oarc = oarc_ptr.GetReference()
## create ring
ring_patch_ptr = bsplines_patch_util.CreateLoftPatch(iarc, oarc)
return ring_patch_ptr
### Create the 2D rectangle aligned with Cartesian axes
def CreateRectangle(start_point, end_point):
line1 = CreateLine(start_point, [end_point[0], start_point[1], start_point[2]])
line2 = CreateLine([start_point[0], end_point[1], start_point[2]], [end_point[0], end_point[1], start_point[2]])
face_ptr = bsplines_patch_util.CreateLoftPatch(line1, line2)
return face_ptr
### Create the 2D parallelogram
### P4---P3
### | |
### P1---P2
def CreateParallelogram(P1, P2, P3, P4):
line1 = CreateLine(P1, P2)
line2 = CreateLine(P4, P3)
face_ptr = bsplines_patch_util.CreateLoftPatch(line1, line2)
return face_ptr
### Create the 3D slab aligned with Cartesian axes
def CreateSlab(start_point, end_point):
line1 = CreateLine(start_point, [end_point[0], start_point[1], start_point[2]])
line2 = CreateLine([start_point[0], end_point[1], start_point[2]], [end_point[0], end_point[1], start_point[2]])
face1_ptr = bsplines_patch_util.CreateLoftPatch(line1, line2)
face1 = face1_ptr.GetReference()
line3 = CreateLine([start_point[0], start_point[1], end_point[2]], [end_point[0], start_point[1], end_point[2]])
line4 = CreateLine([start_point[0], end_point[1], end_point[2]], [end_point[0], end_point[1], end_point[2]])
face2_ptr = bsplines_patch_util.CreateLoftPatch(line3, line4)
face2 = face2_ptr.GetReference()
volume_patch_ptr = bsplines_patch_util.CreateLoftPatch(face1, face2)
return volume_patch_ptr
### Create a half circle with 4 patches configuration
def CreateHalfCircle4(center, axis, radius, rotation_angle, params={}):
if 'make_interface' in params:
make_interface = params['make_interface']
else:
make_interface = True
if 'square_control' in params:
square_control = params['square_control']
else:
square_control = 1.0/3
### create arcs
arc1_ptr = CreateSmallArc(center, axis, radius, 0.0, 45.0)
arc1 = arc1_ptr.GetReference()
arc2_ptr = CreateSmallArc(center, axis, radius, 45.0, 135.0)
arc2 = arc2_ptr.GetReference()
arc3_ptr = CreateSmallArc(center, axis, radius, 135.0, 180.0)
arc3 = arc3_ptr.GetReference()
square_size = square_control*radius
### create lines
if axis == 'x':
p1 = [center[0], center[1] + square_size, center[2]]
p2 = [center[0], center[1] + square_size, center[2] + square_size]
p3 = [center[0], center[1] - square_size, center[2]]
p4 = [center[0], center[1] - square_size, center[2] + square_size]
elif axis == 'y':
p1 = [center[0], center[1], center[2] + square_size]
p2 = [center[0] + square_size, center[1], center[2] + square_size]
p3 = [center[0], center[1], center[2] - square_size]
p4 = [center[0] + square_size, center[1], center[2] - square_size]
elif axis == 'z':
p1 = [center[0] + square_size, center[1], center[2]]
p2 = [center[0] + square_size, center[1] + square_size, center[2]]
p3 = [center[0] - square_size, center[1], center[2]]
p4 = [center[0] - square_size, center[1] + square_size, center[2]]
u_order = arc1.Order(0)
line1_ptr = CreateLine(p1, p2, u_order)
line1 = line1_ptr.GetReference()
line2_ptr = CreateLine(p2, p4, u_order)
line2 = line2_ptr.GetReference()
line3_ptr = CreateLine(p4, p3, u_order)
line3 = line3_ptr.GetReference()
line4_ptr = CreateLine(p1, p3, u_order)
line4 = line4_ptr.GetReference()
patch1_ptr = bsplines_patch_util.CreateLoftPatch(arc1, line1)
patch2_ptr = bsplines_patch_util.CreateLoftPatch(arc2, line2)
patch3_ptr = bsplines_patch_util.CreateLoftPatch(arc3, line3)
patch4_ptr = bsplines_patch_util.CreateLoftPatchFromList2D([line2, line4], 1)
multipatch_refine_util.DegreeElevate(patch4_ptr, [0, u_order-1])
patch1 = patch1_ptr.GetReference()
patch1.Id = 1
patch2 = patch2_ptr.GetReference()
patch2.Id = 2
patch3 = patch3_ptr.GetReference()
patch3.Id = 3
patch4 = patch4_ptr.GetReference()
patch4.Id = 4
print("patch1:" + str(patch1))
print("patch4:" + str(patch4))
if rotation_angle != 0.0:
trans = Transformation()
trans.AppendTransformation(Translation(-center[0], -center[1], -center[2]))
if axis == 'z':
trans.AppendTransformation(RotationZ(rotation_angle))
elif axis == 'y':
trans.AppendTransformation(RotationY(rotation_angle))
elif axis == 'x':
trans.AppendTransformation(RotationX(rotation_angle))
trans.AppendTransformation(Translation(center[0], center[1], center[2]))
patch1.ApplyTransformation(trans)
patch2.ApplyTransformation(trans)
patch3.ApplyTransformation(trans)
patch4.ApplyTransformation(trans)
if make_interface:
bsplines_patch_util.MakeInterface(patch1, BoundarySide2D.U1, patch2, BoundarySide2D.U0, BoundaryDirection.Forward)
bsplines_patch_util.MakeInterface(patch2, BoundarySide2D.U1, patch3, BoundarySide2D.U0, BoundaryDirection.Forward)
bsplines_patch_util.MakeInterface(patch1, BoundarySide2D.V1, patch4, BoundarySide2D.U0, BoundaryDirection.Reversed)
bsplines_patch_util.MakeInterface(patch2, BoundarySide2D.V1, patch4, BoundarySide2D.V0, BoundaryDirection.Forward)
bsplines_patch_util.MakeInterface(patch3, BoundarySide2D.V1, patch4, BoundarySide2D.U1, BoundaryDirection.Forward)
return [patch1_ptr, patch2_ptr, patch3_ptr, patch4_ptr]
### Create a list of Frenet frame along a curve. The Frenet frame is stored as a transformation matrix.
### zvec is a reference vector to compute B at the first sampling point. It shall not be parallel with the tangent vector of the first sampling point.
def GenerateLocalFrenetFrame(curve, num_sampling_points, zvec = [1.0, 0.0, 0.0]):
trans_list = []
B = Array3()
ctrl_pnt_grid_func = curve.GridFunction(CONTROL_POINT_COORDINATES)
print(ctrl_pnt_grid_func)
for i in range(0, num_sampling_points):
xi = float(i) / (num_sampling_points-1)
pnt = [xi, 0.0, 0.0]
P = ctrl_pnt_grid_func.GetValue(pnt)
T = ctrl_pnt_grid_func.GetDerivative(pnt)
T = normalize(T[0])
if i == 0:
cross(B, zvec, T)
B = normalize(B)
else:
B = B - dot(B, T)*T
B = normalize(B)
trans = Transformation(B, T, P)
trans_list.append(trans)
return trans_list
def ExportLocalFrenetFrameToMatlab(trans_list, fn, s = 1.0):
ifile = open(fn, "w")
cnt = 1
ifile.write("s = " + str(s) + ";\n")
ifile.write("C = {}; B = {}; T = {}; N = {};\n")
for trans in trans_list:
P = trans.P()
B = trans.V1()
N = trans.V2()
T = trans.V3()
ifile.write("C{" + str(cnt) + "} = [" + str(P[0]) + " " + str(P[1]) + " " + str(P[2]) + "];\n")
ifile.write("T{" + str(cnt) + "} = [" + str(T[0]) + " " + str(T[1]) + " " + str(T[2]) + "];\n")
ifile.write("B{" + str(cnt) + "} = [" + str(B[0]) + " " + str(B[1]) + " " + str(B[2]) + "];\n")
ifile.write("N{" + str(cnt) + "} = [" + str(N[0]) + " " + str(N[1]) + " " + str(N[2]) + "];\n")
ifile.write("hold on; plot_frame(C{" + str(cnt) + "}, B{" + str(cnt) + "}, N{" + str(cnt) + "}, T{" +str(cnt) + "}, s);\n")
cnt = cnt + 1
ifile.close()
| [
"math.tan",
"math.sqrt",
"math.cos",
"math.exp",
"math.sin"
] | [((791, 835), 'math.sqrt', 'math.sqrt', (['(a[0] ** 2 + a[1] ** 2 + a[2] ** 2)'], {}), '(a[0] ** 2 + a[1] ** 2 + a[2] ** 2)\n', (800, 835), False, 'import math\n'), ((5630, 5646), 'math.cos', 'math.cos', (['dsweep'], {}), '(dsweep)\n', (5638, 5646), False, 'import math\n'), ((1024, 1048), 'math.sqrt', 'math.sqrt', (['(2.0 * math.pi)'], {}), '(2.0 * math.pi)\n', (1033, 1048), False, 'import math\n'), ((5680, 5696), 'math.sin', 'math.sin', (['dsweep'], {}), '(dsweep)\n', (5688, 5696), False, 'import math\n'), ((985, 1025), 'math.exp', 'math.exp', (['(-0.5 * ((x - mu) / sigma) ** 2)'], {}), '(-0.5 * ((x - mu) / sigma) ** 2)\n', (993, 1025), False, 'import math\n'), ((5712, 5728), 'math.tan', 'math.tan', (['dsweep'], {}), '(dsweep)\n', (5720, 5728), False, 'import math\n'), ((1167, 1189), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (1176, 1189), False, 'import math\n'), ((1316, 1338), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (1325, 1338), False, 'import math\n')] |
#!/usr/bin/env python
"""
Analyze Imaging Mass Cytometry data
===================================
This tutorial shows how to apply Squidpy to Imaging Mass Cytometry data.
The data used here comes from a recent paper from :cite:`jackson2020single`.
We provide a pre-processed subset of the data, in :class:`anndata.AnnData` format.
For details on how it was pre-processed, please refer to the original paper.
.. seealso::
See :ref:`sphx_glr_auto_tutorials_tutorial_seqfish.py` for additional analysis examples.
Import packages & data
----------------------
To run the notebook locally, create a conda environment as *conda create -f environment.yml* using this
`environment.yml <https://github.com/theislab/squidpy_notebooks/blob/master/environment.yml>`_
"""
import scanpy as sc
import squidpy as sq
sc.logging.print_header()
print(f"squidpy=={sq.__version__}")
# load the pre-processed dataset
adata = sq.datasets.imc()
###############################################################################
# First, let's visualize the cluster annotation in spatial context
# with :func:`scanpy.pl.spatial`.
sc.pl.spatial(adata, color="cell type", spot_size=10)
###############################################################################
# We can appreciate how the majority of the tissue seems
# to consist of *apoptotic tumor cells*. There also seem to be other
# cell types scattered across the tissue, annotated as *T cells*,
# *Macrophages* and different types of *Stromal cells*. We can also
# appreciate how a subset of tumor cell, *basal CK tumor cells* seems
# to be located in the lower part of the tissue.
###############################################################################
# Co-occurrence across spatial dimensions
# +++++++++++++++++++++++++++++++++++++++
#
# We can visualize cluster co-occurrence in spatial dimensions using the original
# spatial coordinates.
# The co-occurrence score is defined as:
#
# .. math::
#
# \frac{p(exp|cond)}{p(exp)}
#
# where :math:`p(exp|cond)` is the conditional probability of observing a
# cluster :math:`exp` conditioned on the presence of a cluster :math:`cond`, whereas
# :math:`p(exp)` is the probability of observing :math:`exp` in the radius size
# of interest. The score is computed across increasing radii size
# around each cell in the tissue.
#
# We can compute this score with :func:`squidpy.gr.co_occurrence`
# and set the cluster annotation for the conditional probability with
# the argument ``clusters``. Then, we visualize the results with
# :func:`squidpy.pl.co_occurrence`.
# We visualize the result for two conditional groups, namely
# *basal CK tumor cell* and *T cells*.
sq.gr.co_occurrence(adata, cluster_key="cell type")
sq.pl.co_occurrence(
adata,
cluster_key="cell type",
clusters=["basal CK tumor cell", "T cells"],
figsize=(15, 4),
)
###############################################################################
# We can observe that *T cells* seems to co-occur
# with *endothelial* and *vimentin hi stromal cells*,
# whereas *basal CK tumor cell* seem to largely cluster
# together, except for the presence of a type of stromal
# cells (*small elongated stromal cell*) at close distance.
###############################################################################
# Neighborhood enrichment
# +++++++++++++++++++++++
# A similar analysis that can inform on the neighbor structure of
# the tissue is the *neighborhood enrichment test*.
# You can compute such score with the following function: :func:`squidpy.gr.nhood_enrichment`.
# In short, it's an enrichment score on spatial proximity of clusters:
# if spots belonging to two different clusters are often close to each other,
# then they will have a high score and can be defined as being *enriched*.
# On the other hand, if they are far apart, the score will be low
# and they can be defined as *depleted*.
# This score is based on a permutation-based test, and you can set
# the number of permutations with the ``n_perms`` argument (default is 1000).
#
# Since the function works on a connectivity matrix, we need to compute that as well.
# This can be done with :func:`squidpy.gr.spatial_neighbors`.
# Please see :ref:`sphx_glr_auto_examples_graph_compute_spatial_neighbors.py` for more details
# of how this function works.
#
# Finally, we visualize the results with :func:`squidpy.pl.nhood_enrichment`.
sq.gr.spatial_neighbors(adata)
sq.gr.nhood_enrichment(adata, cluster_key="cell type")
sq.pl.nhood_enrichment(adata, cluster_key="cell type")
###############################################################################
# Interestingly, *T cells* shows an enrichment with *stromal* and
# *endothelial cells*, as well as *macrophages*. Another interesting
# result is that *apoptotic tumor cells*, being uniformly spread across
# the tissue area, show a neighbor depletion against any other cluster
# (but a strong enrichment for itself). This is a correct interpretation
# from a permutation based approach, because the cluster annotation,
# being uniformly spread across the tissue, and in high number, it's
# more likely to be enriched with cell types from the same class,
# rather than different one.
###############################################################################
# Interaction matrix and network centralities
# +++++++++++++++++++++++++++++++++++++++++++
# Squidpy provides other descriptive statistics of the spatial graph.
# For instance, the interaction matrix, which counts the number of edges
# that each cluster share with all the others.
# This score can be computed with the function :func:`squidpy.gr.interaction_matrix`.
# We can visualize the results with :func:`squidpy.pl.interaction_matrix`.
sq.gr.interaction_matrix(adata, cluster_key="cell type")
sq.pl.interaction_matrix(adata, cluster_key="cell type")
###############################################################################
# Finally, similar to the previous analysis,
# we can investigate properties of the spatial graph by
# computing different network centralities:
#
# - degree_centrality
# - average_clustering
# - closeness_centrality
#
# Squidpy provides a convenient function for all of them:
# :func:`squidpy.gr.centrality_scores` and
# :func:`squidpy.pl.centrality_scores` for visualization.
sq.gr.centrality_scores(
adata,
cluster_key="cell type",
)
sq.pl.centrality_scores(adata, cluster_key="cell type", figsize=(20, 5), s=500)
###############################################################################
# You can familiarize yourself with network centralities from the
# excellent networkx
# `documentation <https://networkx.org/documentation/stable/reference/algorithms/centrality>`_ .
# For the purpose of this analysis, we can appreciate that the *apoptotic tumor cell*
# clusters shows high closeness centrality, indicating that nodes belonging to that group
# are often close to each other in the spatial graph.
| [
"squidpy.gr.spatial_neighbors",
"squidpy.pl.interaction_matrix",
"squidpy.gr.centrality_scores",
"squidpy.gr.nhood_enrichment",
"squidpy.pl.centrality_scores",
"scanpy.pl.spatial",
"squidpy.pl.nhood_enrichment",
"scanpy.logging.print_header",
"squidpy.gr.interaction_matrix",
"squidpy.gr.co_occurre... | [((811, 836), 'scanpy.logging.print_header', 'sc.logging.print_header', ([], {}), '()\n', (834, 836), True, 'import scanpy as sc\n'), ((915, 932), 'squidpy.datasets.imc', 'sq.datasets.imc', ([], {}), '()\n', (930, 932), True, 'import squidpy as sq\n'), ((1116, 1169), 'scanpy.pl.spatial', 'sc.pl.spatial', (['adata'], {'color': '"""cell type"""', 'spot_size': '(10)'}), "(adata, color='cell type', spot_size=10)\n", (1129, 1169), True, 'import scanpy as sc\n'), ((2673, 2724), 'squidpy.gr.co_occurrence', 'sq.gr.co_occurrence', (['adata'], {'cluster_key': '"""cell type"""'}), "(adata, cluster_key='cell type')\n", (2692, 2724), True, 'import squidpy as sq\n'), ((2725, 2843), 'squidpy.pl.co_occurrence', 'sq.pl.co_occurrence', (['adata'], {'cluster_key': '"""cell type"""', 'clusters': "['basal CK tumor cell', 'T cells']", 'figsize': '(15, 4)'}), "(adata, cluster_key='cell type', clusters=[\n 'basal CK tumor cell', 'T cells'], figsize=(15, 4))\n", (2744, 2843), True, 'import squidpy as sq\n'), ((4394, 4424), 'squidpy.gr.spatial_neighbors', 'sq.gr.spatial_neighbors', (['adata'], {}), '(adata)\n', (4417, 4424), True, 'import squidpy as sq\n'), ((4425, 4479), 'squidpy.gr.nhood_enrichment', 'sq.gr.nhood_enrichment', (['adata'], {'cluster_key': '"""cell type"""'}), "(adata, cluster_key='cell type')\n", (4447, 4479), True, 'import squidpy as sq\n'), ((4480, 4534), 'squidpy.pl.nhood_enrichment', 'sq.pl.nhood_enrichment', (['adata'], {'cluster_key': '"""cell type"""'}), "(adata, cluster_key='cell type')\n", (4502, 4534), True, 'import squidpy as sq\n'), ((5727, 5783), 'squidpy.gr.interaction_matrix', 'sq.gr.interaction_matrix', (['adata'], {'cluster_key': '"""cell type"""'}), "(adata, cluster_key='cell type')\n", (5751, 5783), True, 'import squidpy as sq\n'), ((5784, 5840), 'squidpy.pl.interaction_matrix', 'sq.pl.interaction_matrix', (['adata'], {'cluster_key': '"""cell type"""'}), "(adata, cluster_key='cell type')\n", (5808, 5840), True, 'import squidpy as sq\n'), ((6302, 6357), 'squidpy.gr.centrality_scores', 'sq.gr.centrality_scores', (['adata'], {'cluster_key': '"""cell type"""'}), "(adata, cluster_key='cell type')\n", (6325, 6357), True, 'import squidpy as sq\n'), ((6369, 6448), 'squidpy.pl.centrality_scores', 'sq.pl.centrality_scores', (['adata'], {'cluster_key': '"""cell type"""', 'figsize': '(20, 5)', 's': '(500)'}), "(adata, cluster_key='cell type', figsize=(20, 5), s=500)\n", (6392, 6448), True, 'import squidpy as sq\n')] |
from pymongo import MongoClient
client = MongoClient()
db = client.music_space
def print_collection(collection):
print("/" * 75)
for x in db[collection].find():
print(x)
print("/" * 75)
def save_sales_on_mongo(collection, data):
for i in range(len(data['sales'])):
data['sales'][i]['total'] = float(data['sales'][i]['total'])
# db[collection].insert_one({ 'sales': data['sales'] })
if data['sales'] != []:
db[collection].insert_many( data['sales'] )
def save_recommendations_on_mongo(collection, data):
db[collection].insert_one({ 'rec': data })
| [
"pymongo.MongoClient"
] | [((42, 55), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (53, 55), False, 'from pymongo import MongoClient\n')] |
"""
.. todo::
WRITEME
"""
import logging
from theano import function, shared
from pylearn2.optimization import linear_cg as cg
from pylearn2.optimization.feature_sign import feature_sign_search
import numpy as N
import theano.tensor as T
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
class LocalCoordinateCoding(object):
"""
.. todo::
WRITEME
Parameters
----------
nvis : WRITEME
nhid : WRITEME
coeff : WRITEME
"""
def __init__(self, nvis, nhid, coeff):
self.nvis = nvis
self.nhid = nhid
self.coeff = float(coeff)
self.rng = make_np_rng(None, [1, 2, 3], which_method="randn")
self.redo_everything()
def get_output_channels(self):
"""
.. todo::
WRITEME
"""
return self.nhid
def redo_everything(self):
"""
.. todo::
WRITEME
"""
self.W = shared(self.rng.randn(self.nhid, self.nvis), name='W')
self.W.T.name = 'W.T'
def weights_format(self):
"""
.. todo::
WRITEME
"""
return ['h', 'v']
def optimize_gamma(self, example):
"""
.. todo::
WRITEME
"""
#variable names chosen to follow the arguments to l1ls_featuresign
Y = N.zeros((self.nvis,))
Y[:] = example
c = (1e-10 + N.square(self.W.get_value(borrow=True) -
example).sum(axis=1))
A = self.W.get_value(borrow=True).T / c
x = feature_sign_search(A, Y, self.coeff)
g = x / c
return g
def train_batch(self, dataset, batch_size):
"""
.. todo::
WRITEME
"""
#TODO-- this results in compilation happening every time learn is
# called should cache the compilation results, including those
# inside cg
X = dataset.get_design_matrix()
m = X.shape[0]
assert X.shape[1] == self.nvis
gamma = N.zeros((batch_size, self.nhid))
cur_gamma = T.vector(name='cur_gamma')
cur_v = T.vector(name='cur_v')
recons = T.dot(cur_gamma, self.W)
recons.name = 'recons'
recons_diffs = cur_v - recons
recons_diffs.name = 'recons_diffs'
recons_diff_sq = T.sqr(recons_diffs)
recons_diff_sq.name = 'recons_diff'
recons_error = T.sum(recons_diff_sq)
recons_error.name = 'recons_error'
dict_dists = T.sum(T.sqr(self.W - cur_v), axis=1)
dict_dists.name = 'dict_dists'
abs_gamma = abs(cur_gamma)
abs_gamma.name = 'abs_gamma'
weighted_dists = T.dot(abs_gamma, dict_dists)
weighted_dists.name = 'weighted_dists'
penalty = self.coeff * weighted_dists
penalty.name = 'penalty'
#prevent directions of absolute flatness in the hessian
#W_sq = T.sqr(self.W)
#W_sq.name = 'W_sq'
#debug = T.sum(W_sq)
debug = 1e-10 * T.sum(dict_dists)
debug.name = 'debug'
#J = debug
J = recons_error + penalty + debug
J.name = 'J'
Jf = function([cur_v, cur_gamma], J)
start = self.rng.randint(m - batch_size + 1)
batch_X = X[start:start + batch_size, :]
#TODO-- optimize gamma
logger.info('optimizing gamma')
for i in xrange(batch_size):
#print str(i+1)+'/'+str(batch_size)
gamma[i, :] = self.optimize_gamma(batch_X[i, :])
logger.info('max min')
logger.info(N.abs(gamma).min(axis=0).max())
logger.info('min max')
logger.info(N.abs(gamma).max(axis=0).max())
#Optimize W
logger.info('optimizing W')
logger.warning("not tested since switching to Razvan's all-theano "
"implementation of linear cg")
cg.linear_cg(J, [self.W], max_iters=3)
err = 0.
for i in xrange(batch_size):
err += Jf(batch_X[i, :], gamma[i, :])
assert not N.isnan(err)
assert not N.isinf(err)
logger.info('err: {0}'.format(err))
return True
| [
"logging.getLogger",
"pylearn2.optimization.feature_sign.feature_sign_search",
"pylearn2.utils.rng.make_np_rng",
"pylearn2.optimization.linear_cg.linear_cg",
"numpy.abs",
"theano.function",
"theano.tensor.sum",
"theano.tensor.vector",
"theano.tensor.sqr",
"numpy.zeros",
"numpy.isnan",
"numpy.i... | [((297, 324), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'import logging\n'), ((647, 697), 'pylearn2.utils.rng.make_np_rng', 'make_np_rng', (['None', '[1, 2, 3]'], {'which_method': '"""randn"""'}), "(None, [1, 2, 3], which_method='randn')\n", (658, 697), False, 'from pylearn2.utils.rng import make_np_rng\n'), ((1363, 1384), 'numpy.zeros', 'N.zeros', (['(self.nvis,)'], {}), '((self.nvis,))\n', (1370, 1384), True, 'import numpy as N\n'), ((1582, 1619), 'pylearn2.optimization.feature_sign.feature_sign_search', 'feature_sign_search', (['A', 'Y', 'self.coeff'], {}), '(A, Y, self.coeff)\n', (1601, 1619), False, 'from pylearn2.optimization.feature_sign import feature_sign_search\n'), ((2051, 2083), 'numpy.zeros', 'N.zeros', (['(batch_size, self.nhid)'], {}), '((batch_size, self.nhid))\n', (2058, 2083), True, 'import numpy as N\n'), ((2104, 2130), 'theano.tensor.vector', 'T.vector', ([], {'name': '"""cur_gamma"""'}), "(name='cur_gamma')\n", (2112, 2130), True, 'import theano.tensor as T\n'), ((2147, 2169), 'theano.tensor.vector', 'T.vector', ([], {'name': '"""cur_v"""'}), "(name='cur_v')\n", (2155, 2169), True, 'import theano.tensor as T\n'), ((2187, 2211), 'theano.tensor.dot', 'T.dot', (['cur_gamma', 'self.W'], {}), '(cur_gamma, self.W)\n', (2192, 2211), True, 'import theano.tensor as T\n'), ((2351, 2370), 'theano.tensor.sqr', 'T.sqr', (['recons_diffs'], {}), '(recons_diffs)\n', (2356, 2370), True, 'import theano.tensor as T\n'), ((2439, 2460), 'theano.tensor.sum', 'T.sum', (['recons_diff_sq'], {}), '(recons_diff_sq)\n', (2444, 2460), True, 'import theano.tensor as T\n'), ((2701, 2729), 'theano.tensor.dot', 'T.dot', (['abs_gamma', 'dict_dists'], {}), '(abs_gamma, dict_dists)\n', (2706, 2729), True, 'import theano.tensor as T\n'), ((3179, 3210), 'theano.function', 'function', (['[cur_v, cur_gamma]', 'J'], {}), '([cur_v, cur_gamma], J)\n', (3187, 3210), False, 'from theano import function, shared\n'), ((3894, 3932), 'pylearn2.optimization.linear_cg.linear_cg', 'cg.linear_cg', (['J', '[self.W]'], {'max_iters': '(3)'}), '(J, [self.W], max_iters=3)\n', (3906, 3932), True, 'from pylearn2.optimization import linear_cg as cg\n'), ((2532, 2553), 'theano.tensor.sqr', 'T.sqr', (['(self.W - cur_v)'], {}), '(self.W - cur_v)\n', (2537, 2553), True, 'import theano.tensor as T\n'), ((3034, 3051), 'theano.tensor.sum', 'T.sum', (['dict_dists'], {}), '(dict_dists)\n', (3039, 3051), True, 'import theano.tensor as T\n'), ((4058, 4070), 'numpy.isnan', 'N.isnan', (['err'], {}), '(err)\n', (4065, 4070), True, 'import numpy as N\n'), ((4090, 4102), 'numpy.isinf', 'N.isinf', (['err'], {}), '(err)\n', (4097, 4102), True, 'import numpy as N\n'), ((3584, 3596), 'numpy.abs', 'N.abs', (['gamma'], {}), '(gamma)\n', (3589, 3596), True, 'import numpy as N\n'), ((3667, 3679), 'numpy.abs', 'N.abs', (['gamma'], {}), '(gamma)\n', (3672, 3679), True, 'import numpy as N\n')] |
import re
import cv2
import json
import base64
import logging
import requests
import numpy as np
from datetime import datetime
import pytz
import gridfs
import pymongo
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError as MongoServerSelectionTimeoutError
import imagehash
from PIL import Image
from skimage.metrics import structural_similarity
class AnalyzeConditionsNotMetException(Exception):
"""
Raised when an error is encountered during execution of the run() function
"""
pass
class MediaAnalyzer(object):
"""
This class is used to analyze data generated by a MediaScraper object:
https://github.com/jesseVDwolf/ForumMediaScraper
It will retrieve data in batches using the MediaScraper's REST interface:
https://github.com/jesseVDwolf/ForumMediaScraperREST
"""
# taken from https://github.com/django/django/blob/stable/1.3.x/django/core/validators.py#L45
URL_VALIDATION_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
MONGO_DEFAULT_URI = "mongodb://localhost:27017"
def __init__(self, scraper_rest_host: str="http://localhost:5000", log_level: int=logging.DEBUG,
document_retrieval_batch_size: int=5, mongo_uri: str=MONGO_DEFAULT_URI):
if re.match(MediaAnalyzer.URL_VALIDATION_REGEX, scraper_rest_host) is None:
raise ValueError('Invalid scraper_rest_host url: %s' % scraper_rest_host)
self.scraper_rest_host = scraper_rest_host
self.document_retrieval_batch_size = document_retrieval_batch_size
self.timezone = pytz.timezone('Europe/Berlin')
# create database related objects
self._mongo_client = MongoClient(mongo_uri)
self._mongo_database = self._mongo_client['9GagMedia']
self.gridfs = gridfs.GridFS(self._mongo_database)
self.logger = logging.getLogger(__name__)
self.logger.setLevel(log_level)
logging_args = {
"format": '%(asctime)s %(levelname)-8s %(message)s',
"level": logging.INFO,
"datefmt": '%Y-%m-%d %H:%M:%S'
}
logging.basicConfig(**logging_args)
if not self._mongo_database['Counter'].find_one():
self._mongo_database['Counter'].insert_one({'_id': 'OrderNum', 'val': 1})
def _get_tz_date(self):
return datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(self.timezone)
@staticmethod
def _scale_images(image_one: np.ndarray, image_two: np.ndarray, scale_percent_dif: float=0.02):
# Scale the images so that they have the same
# dimensions. The bigger image will always be scaled down;
# It is considered bigger if contains more pixels i.e width x height
if image_one.shape == image_two.shape:
return image_one, image_two
# use aspect ratio to determine if images can be rescaled
*_, w1, h1 = cv2.boundingRect(image_one)
*_, w2, h2 = cv2.boundingRect(image_two)
if abs((float(w1) / h1) - (float(w2) / h2)) >= scale_percent_dif:
return None, None
if sum(image_one.shape[:2]) > sum(image_two.shape[:2]):
image_one = cv2.resize(
src=image_one,
dsize=(image_two.shape[1], image_two.shape[0]),
interpolation=cv2.INTER_CUBIC
)
else:
image_two = cv2.resize(
src=image_two,
dsize=(image_one.shape[1], image_one.shape[0]),
interpolation=cv2.INTER_CUBIC
)
return image_one, image_two
@staticmethod
def _mse(image_one: np.ndarray, image_two: np.ndarray):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((image_one.astype("float") - image_two.astype("float")) ** 2)
err /= float(image_one.shape[0] * image_one.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
@staticmethod
def _img_hash(image_one: np.ndarray, image_two: np.ndarray, func=imagehash.average_hash, cutoff: int=10):
# Use an image hashing algorithm to check for similarity between images
# Calculate the hashes of both images using one of the functions from
# the https://github.com/JohannesBuchner/imagehash project and subtract
# them from each other. A cutoff can be specified to account for
# little discrepancies
h1 = func(Image.fromarray(image_one))
h2 = func(Image.fromarray(image_two))
s = (h1 - h2) - cutoff
# return the similarity between images where the closer to 0 the better.
# taking into account the specified cutoff where s can not be a negative number
return int((abs(s)+s)/2)
def run(self):
try:
"""
Pre-run validation of resources on scraper rest interface and
the locally configured mongodb server
"""
r = requests.get(
url="%s/query" % self.scraper_rest_host,
params={'limit': 1, 'offset': 0}
)
r.raise_for_status()
self._mongo_client.server_info()
"""
Start processing. If posts have already been processed, use the ArticleId of the
last processed article to determine when to stop retrieving more data. Then use
different methods to determine similairity between images:
- image hashes
- mean squared error
- structural similarity measure
"""
last_article = self._mongo_database['Posts'].find_one(sort=[("OrderNum", pymongo.ASCENDING)])
run = self._mongo_database['Runs'].insert_one({
'StartProcessTime': self._get_tz_date(),
'EndProcessTime': None,
'PostsProcessed': 0,
'BatchesProcessed': 0
})
request_offset = 0
final_batch = False
last_article_found = False
posts_processed = 0
batches_processed = 0
while True:
resp = requests.get(url="%s/query" % self.scraper_rest_host, params={
'limit': self.document_retrieval_batch_size,
'offset': request_offset
})
resp.raise_for_status()
data = resp.json()
self.logger.debug('%s: Received new batch of data at %s using offset %d and limit %d' % (
str(run.inserted_id), self._get_tz_date().strftime("%Y-%m-%d %H:%M:%S"), request_offset, self.document_retrieval_batch_size))
if len(data['documents']) == 0:
self.logger.debug('%s: No more documents returned by %s using offset %d and limit %d' % (
str(run.inserted_id), self.scraper_rest_host, request_offset, self.document_retrieval_batch_size))
self.logger.info('%s: No more documents found. Finished %d batches' % (str(run.inserted_id), batches_processed))
break
if len(data['documents']) < self.document_retrieval_batch_size:
self.logger.debug('%s: No more data available from %s. Setting final batch' % (
str(run.inserted_id), self.scraper_rest_host))
final_batch = True
if len([doc for doc in data['documents'] if len(doc['Posts']) == 0]) == len(data['documents']):
self.logger.debug('%s: No posts found in documents at offset %d with limit %d' % (
str(run.inserted_id), request_offset, self.document_retrieval_batch_size))
self.logger.info('%s: No posts found in batch. Retrieving next batch' % str(run.inserted_id))
request_offset += self.document_retrieval_batch_size
batches_processed += 1
continue
for doc in [doc for doc in data['documents'] if len(doc['Posts']) != 0]:
if last_article:
if last_article['ArticleId'] == doc['StartPostId'] or last_article_found:
self.logger.debug('%s: Last article %s found at offset %d with limit %d' % (
str(run.inserted_id), str(last_article['ArticleId']), request_offset, self.document_retrieval_batch_size))
final_batch = True
break
self.logger.info('%s: %d posts found for processing in document %s' % (
str(run.inserted_id), len(doc['Posts']), doc['_id']))
processed_posts = self._mongo_database['Posts'].find({})
for post in doc['Posts']:
if last_article:
if last_article['ArticleId'] == post['ArticleId']:
self.logger.debug('%s: Last article %s found at offset %d with limit %d' % (
str(run.inserted_id), str(last_article['ArticleId']), request_offset, self.document_retrieval_batch_size))
last_article_found = True
break
im_s = str(post['MediaData'])
im_b = base64.b64decode(im_s.encode('utf-8'))
im_buff = np.asarray(bytearray(im_b), dtype=np.uint8)
im = cv2.imdecode(im_buff, cv2.IMREAD_GRAYSCALE)
media_id = self.gridfs.put(im_b)
md = {
"ArticleId": str(post['ArticleId']),
"RunId": run.inserted_id,
"PostProcessedTime": self._get_tz_date(),
"Dim": im.shape,
"MediaId": media_id,
"IsOriginal": True,
"RepostOff": None,
"Reposts": []
}
for pp in processed_posts:
if post['ArticleId'] == pp['ArticleId']:
# duplicates will always be exactly the same
# solution to a bug in the MediaScraper...
continue
f = self.gridfs.get(pp['MediaId'])
im1_buff = np.asarray(bytearray(f.read(size=-1)), dtype=np.uint8)
im1 = cv2.imdecode(im1_buff, cv2.IMREAD_GRAYSCALE)
im0, im1 = self._scale_images(im, im1)
if not hasattr(im0, "shape"):
# images could not be scaled since difference in dimensions
# is too big. Must be unique based on this
continue
mse = self._mse(im0, im1)
ss = structural_similarity(im0, im1)
hs = self._img_hash(im0, im1)
# The hash similarity will determine if an image is even close to being
# similar to the processed image. The structural similarity measure will
# then decide if this is actually correct. A last check is done to make
# sure that its not a meme that is posted with the same background but
# with different text using the very sensitive mse measure
if hs == 0:
if ss >= 0.65:
if not mse >= 2000.00 and pp['IsOriginal']:
# db image seems to be very similar to the processed image
md.update({"IsOriginal": False, "RepostOff": pp['_id'], "Reposts": None})
pp['Reposts'].append({
"ArticleId": md['ArticleId'],
"mse": mse,
"ssim": ss,
"hs": hs,
"certainty": 1
})
self._mongo_database['Posts'].replace_one({"_id": pp['_id']}, pp)
else:
# image background might be the same with different text
continue
else:
# structural similarity is too far off must be unique
continue
else:
# images are not similar at all
continue
self._mongo_database['Posts'].insert_one(md)
posts_processed += 1
if final_batch:
break
request_offset += self.document_retrieval_batch_size
batches_processed += 1
self.logger.info('%s: Finished final batch. %d posts processed' % (str(run.inserted_id), posts_processed))
self._mongo_database['Runs'].update_one({'_id': run.inserted_id}, {
"$set": {'PostsProcessed': posts_processed, 'EndProcessTime': self._get_tz_date()}
})
except requests.exceptions.RequestException as ree:
raise AnalyzeConditionsNotMetException({'message': ree})
except MongoServerSelectionTimeoutError as msste:
raise AnalyzeConditionsNotMetException({'message': msste})
except json.JSONDecodeError as je:
raise AnalyzeConditionsNotMetException({'message': je})
| [
"gridfs.GridFS",
"pytz.timezone",
"logging.getLogger",
"logging.basicConfig",
"PIL.Image.fromarray",
"skimage.metrics.structural_similarity",
"re.compile",
"datetime.datetime.utcnow",
"re.match",
"requests.get",
"cv2.imdecode",
"pymongo.MongoClient",
"cv2.resize",
"cv2.boundingRect"
] | [((1013, 1232), 're.compile', 're.compile', (['"""^(?:http|ftp)s?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$"""', 're.IGNORECASE'], {}), "(\n '^(?:http|ftp)s?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$'\n , re.IGNORECASE)\n", (1023, 1232), False, 'import re\n'), ((1938, 1968), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Berlin"""'], {}), "('Europe/Berlin')\n", (1951, 1968), False, 'import pytz\n'), ((2044, 2066), 'pymongo.MongoClient', 'MongoClient', (['mongo_uri'], {}), '(mongo_uri)\n', (2055, 2066), False, 'from pymongo import MongoClient\n'), ((2154, 2189), 'gridfs.GridFS', 'gridfs.GridFS', (['self._mongo_database'], {}), '(self._mongo_database)\n', (2167, 2189), False, 'import gridfs\n'), ((2215, 2242), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2232, 2242), False, 'import logging\n'), ((2476, 2511), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '(**logging_args)\n', (2495, 2511), False, 'import logging\n'), ((3280, 3307), 'cv2.boundingRect', 'cv2.boundingRect', (['image_one'], {}), '(image_one)\n', (3296, 3307), False, 'import cv2\n'), ((3330, 3357), 'cv2.boundingRect', 'cv2.boundingRect', (['image_two'], {}), '(image_two)\n', (3346, 3357), False, 'import cv2\n'), ((1623, 1686), 're.match', 're.match', (['MediaAnalyzer.URL_VALIDATION_REGEX', 'scraper_rest_host'], {}), '(MediaAnalyzer.URL_VALIDATION_REGEX, scraper_rest_host)\n', (1631, 1686), False, 'import re\n'), ((3556, 3664), 'cv2.resize', 'cv2.resize', ([], {'src': 'image_one', 'dsize': '(image_two.shape[1], image_two.shape[0])', 'interpolation': 'cv2.INTER_CUBIC'}), '(src=image_one, dsize=(image_two.shape[1], image_two.shape[0]),\n interpolation=cv2.INTER_CUBIC)\n', (3566, 3664), False, 'import cv2\n'), ((3767, 3875), 'cv2.resize', 'cv2.resize', ([], {'src': 'image_two', 'dsize': '(image_one.shape[1], image_one.shape[0])', 'interpolation': 'cv2.INTER_CUBIC'}), '(src=image_two, dsize=(image_one.shape[1], image_one.shape[0]),\n interpolation=cv2.INTER_CUBIC)\n', (3777, 3875), False, 'import cv2\n'), ((5013, 5039), 'PIL.Image.fromarray', 'Image.fromarray', (['image_one'], {}), '(image_one)\n', (5028, 5039), False, 'from PIL import Image\n'), ((5060, 5086), 'PIL.Image.fromarray', 'Image.fromarray', (['image_two'], {}), '(image_two)\n', (5075, 5086), False, 'from PIL import Image\n'), ((5541, 5632), 'requests.get', 'requests.get', ([], {'url': "('%s/query' % self.scraper_rest_host)", 'params': "{'limit': 1, 'offset': 0}"}), "(url='%s/query' % self.scraper_rest_host, params={'limit': 1,\n 'offset': 0})\n", (5553, 5632), False, 'import requests\n'), ((6746, 6884), 'requests.get', 'requests.get', ([], {'url': "('%s/query' % self.scraper_rest_host)", 'params': "{'limit': self.document_retrieval_batch_size, 'offset': request_offset}"}), "(url='%s/query' % self.scraper_rest_host, params={'limit': self\n .document_retrieval_batch_size, 'offset': request_offset})\n", (6758, 6884), False, 'import requests\n'), ((2708, 2725), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2723, 2725), False, 'from datetime import datetime\n'), ((10167, 10210), 'cv2.imdecode', 'cv2.imdecode', (['im_buff', 'cv2.IMREAD_GRAYSCALE'], {}), '(im_buff, cv2.IMREAD_GRAYSCALE)\n', (10179, 10210), False, 'import cv2\n'), ((11272, 11316), 'cv2.imdecode', 'cv2.imdecode', (['im1_buff', 'cv2.IMREAD_GRAYSCALE'], {}), '(im1_buff, cv2.IMREAD_GRAYSCALE)\n', (11284, 11316), False, 'import cv2\n'), ((11746, 11777), 'skimage.metrics.structural_similarity', 'structural_similarity', (['im0', 'im1'], {}), '(im0, im1)\n', (11767, 11777), False, 'from skimage.metrics import structural_similarity\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import time
import sys
import functools
import math
import paddle
import paddle.fluid as fluid
import paddle.dataset.flowers as flowers
import models
import reader
import argparse
from models.learning_rate import cosine_decay
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('total_images', int, 1281167, "Training image number.")
add_arg('num_epochs', int, 120, "number of epochs.")
add_arg('class_dim', int, 1000, "Class number.")
add_arg('image_shape', str, "3,224,224", "input image size")
add_arg('model_save_dir', str, "output", "model save directory")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('checkpoint', str, None, "Whether to resume checkpoint.")
add_arg('lr', float, 0.1, "set learning rate.")
add_arg('lr_strategy', str, "piecewise_decay", "Set the learning rate decay strategy.")
add_arg('model', str, "SE_ResNeXt50_32x4d", "Set the network to use.")
add_arg('enable_ce', bool, False, "If set True, enable continuous evaluation job.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def optimizer_setting(params):
ls = params["learning_strategy"]
if ls["name"] == "piecewise_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
bd = [step * e for e in ls["epochs"]]
base_lr = params["lr"]
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
elif ls["name"] == "cosine_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
lr = params["lr"]
num_epochs = params["num_epochs"]
optimizer = fluid.optimizer.Momentum(
learning_rate=cosine_decay(
learning_rate=lr, step_each_epoch=step, epochs=num_epochs),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
else:
lr = params["lr"]
optimizer = fluid.optimizer.Momentum(
learning_rate=lr,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
def train(args):
# parameters from arguments
class_dim = args.class_dim
model_name = args.model
checkpoint = args.checkpoint
pretrained_model = args.pretrained_model
with_memory_optimization = args.with_mem_opt
model_save_dir = args.model_save_dir
image_shape = [int(m) for m in args.image_shape.split(",")]
assert model_name in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
model = models.__dict__[model_name]()
if args.enable_ce:
assert model_name == "SE_ResNeXt50_32x4d"
fluid.default_startup_program().random_seed = 1000
model.params["enable_ce"] = True
class_dim = 102
if model_name == "GoogleNet":
out0, out1, out2 = model.net(input=image, class_dim=class_dim)
cost0 = fluid.layers.cross_entropy(input=out0, label=label)
cost1 = fluid.layers.cross_entropy(input=out1, label=label)
cost2 = fluid.layers.cross_entropy(input=out2, label=label)
avg_cost0 = fluid.layers.mean(x=cost0)
avg_cost1 = fluid.layers.mean(x=cost1)
avg_cost2 = fluid.layers.mean(x=cost2)
avg_cost = avg_cost0 + 0.3 * avg_cost1 + 0.3 * avg_cost2
acc_top1 = fluid.layers.accuracy(input=out0, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out0, label=label, k=5)
else:
out = model.net(input=image, class_dim=class_dim)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
test_program = fluid.default_main_program().clone(for_test=True)
# parameters from model and arguments
params = model.params
params["total_images"] = args.total_images
params["lr"] = args.lr
params["num_epochs"] = args.num_epochs
params["learning_strategy"]["batch_size"] = args.batch_size
params["learning_strategy"]["name"] = args.lr_strategy
# initialize optimizer
optimizer = optimizer_setting(params)
opts = optimizer.minimize(avg_cost)
if with_memory_optimization:
fluid.memory_optimize(fluid.default_main_program())
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if checkpoint is not None:
fluid.io.load_persistables(exe, checkpoint)
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
train_batch_size = args.batch_size
test_batch_size = 16
if not args.enable_ce:
train_reader = paddle.batch(reader.train(), batch_size=train_batch_size)
test_reader = paddle.batch(reader.val(), batch_size=test_batch_size)
else:
# use flowers dataset for CE and set use_xmap False to avoid disorder data
# but it is time consuming. For faster speed, need another dataset.
import random
random.seed(0)
np.random.seed(0)
train_reader = paddle.batch(
flowers.train(use_xmap=False), batch_size=train_batch_size)
test_reader = paddle.batch(
flowers.test(use_xmap=False), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
train_exe = fluid.ParallelExecutor(
use_cuda=True if args.use_gpu else False, loss_name=avg_cost.name)
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
gpu = os.getenv("CUDA_VISIBLE_DEVICES") or ""
gpu_nums = len(gpu.split(","))
for pass_id in range(params["num_epochs"]):
train_info = [[], [], []]
test_info = [[], [], []]
train_time = []
for batch_id, data in enumerate(train_reader()):
t1 = time.time()
loss, acc1, acc5 = train_exe.run(fetch_list, feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
acc1 = np.mean(np.array(acc1))
acc5 = np.mean(np.array(acc5))
train_info[0].append(loss)
train_info[1].append(acc1)
train_info[2].append(acc5)
train_time.append(period)
if batch_id % 10 == 0:
print("Pass {0}, trainbatch {1}, loss {2}, \
acc1 {3}, acc5 {4} time {5}"
.format(pass_id, \
batch_id, loss, acc1, acc5, \
"%2.2f sec" % period))
sys.stdout.flush()
train_loss = np.array(train_info[0]).mean()
train_acc1 = np.array(train_info[1]).mean()
train_acc5 = np.array(train_info[2]).mean()
train_speed = np.array(train_time).mean() / train_batch_size
cnt = 0
for test_batch_id, data in enumerate(test_reader()):
t1 = time.time()
loss, acc1, acc5 = exe.run(test_program,
fetch_list=fetch_list,
feed=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(loss)
acc1 = np.mean(acc1)
acc5 = np.mean(acc5)
test_info[0].append(loss * len(data))
test_info[1].append(acc1 * len(data))
test_info[2].append(acc5 * len(data))
cnt += len(data)
if test_batch_id % 10 == 0:
print("Pass {0},testbatch {1},loss {2}, \
acc1 {3},acc5 {4},time {5}"
.format(pass_id, \
test_batch_id, loss, acc1, acc5, \
"%2.2f sec" % period))
sys.stdout.flush()
test_loss = np.sum(test_info[0]) / cnt
test_acc1 = np.sum(test_info[1]) / cnt
test_acc5 = np.sum(test_info[2]) / cnt
print("End pass {0}, train_loss {1}, train_acc1 {2}, train_acc5 {3}, "
"test_loss {4}, test_acc1 {5}, test_acc5 {6}".format(pass_id, \
train_loss, train_acc1, train_acc5, test_loss, test_acc1, \
test_acc5))
sys.stdout.flush()
model_path = os.path.join(model_save_dir + '/' + model_name,
str(pass_id))
if not os.path.isdir(model_path):
os.makedirs(model_path)
fluid.io.save_persistables(exe, model_path)
# This is for continuous evaluation only
if args.enable_ce and pass_id == args.num_epochs - 1:
if gpu_nums == 1:
# Use the mean cost/acc for training
print("kpis train_cost %s" % train_loss)
print("kpis train_acc_top1 %s" % train_acc1)
print("kpis train_acc_top5 %s" % train_acc5)
# Use the mean cost/acc for testing
print("kpis test_cost %s" % test_loss)
print("kpis test_acc_top1 %s" % test_acc1)
print("kpis test_acc_top5 %s" % test_acc5)
print("kpis train_speed %s" % train_speed)
else:
# Use the mean cost/acc for training
print("kpis train_cost_card%s %s" %
(gpu_nums, train_loss))
print("kpis train_acc_top1_card%s %s" %
(gpu_nums, train_acc1))
print("kpis train_acc_top5_card%s %s" %
(gpu_nums, train_acc5))
# Use the mean cost/acc for testing
print("kpis test_cost_card%s %s" %
(gpu_nums, test_loss))
print("kpis test_acc_top1_card%s %s" %
(gpu_nums, test_acc1))
print("kpis test_acc_top5_card%s %s" %
(gpu_nums, test_acc5))
print("kpis train_speed_card%s %s" %
(gpu_nums, train_speed))
def main():
args = parser.parse_args()
print_arguments(args)
train(args)
if __name__ == '__main__':
main()
| [
"paddle.fluid.DataFeeder",
"paddle.dataset.flowers.test",
"paddle.fluid.layers.data",
"paddle.fluid.layers.cross_entropy",
"numpy.array",
"paddle.fluid.Executor",
"paddle.dataset.flowers.train",
"paddle.fluid.layers.piecewise_decay",
"reader.train",
"numpy.mean",
"argparse.ArgumentParser",
"pa... | [((425, 469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (448, 469), False, 'import argparse\n'), ((480, 530), 'functools.partial', 'functools.partial', (['add_arguments'], {'argparser': 'parser'}), '(add_arguments, argparser=parser)\n', (497, 530), False, 'import functools\n'), ((3879, 3946), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""image"""', 'shape': 'image_shape', 'dtype': '"""float32"""'}), "(name='image', shape=image_shape, dtype='float32')\n", (3896, 3946), True, 'import paddle.fluid as fluid\n'), ((3959, 4016), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""label"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1], dtype='int64')\n", (3976, 4016), True, 'import paddle.fluid as fluid\n'), ((5923, 5944), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (5937, 5944), True, 'import paddle.fluid as fluid\n'), ((6995, 7050), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'place': 'place', 'feed_list': '[image, label]'}), '(place=place, feed_list=[image, label])\n', (7011, 7050), True, 'import paddle.fluid as fluid\n'), ((7068, 7162), 'paddle.fluid.ParallelExecutor', 'fluid.ParallelExecutor', ([], {'use_cuda': '(True if args.use_gpu else False)', 'loss_name': 'avg_cost.name'}), '(use_cuda=True if args.use_gpu else False, loss_name=\n avg_cost.name)\n', (7090, 7162), True, 'import paddle.fluid as fluid\n'), ((11723, 11744), 'utility.print_arguments', 'print_arguments', (['args'], {}), '(args)\n', (11738, 11744), False, 'from utility import add_arguments, print_arguments\n'), ((4403, 4454), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'out0', 'label': 'label'}), '(input=out0, label=label)\n', (4429, 4454), True, 'import paddle.fluid as fluid\n'), ((4471, 4522), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'out1', 'label': 'label'}), '(input=out1, label=label)\n', (4497, 4522), True, 'import paddle.fluid as fluid\n'), ((4539, 4590), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'out2', 'label': 'label'}), '(input=out2, label=label)\n', (4565, 4590), True, 'import paddle.fluid as fluid\n'), ((4611, 4637), 'paddle.fluid.layers.mean', 'fluid.layers.mean', ([], {'x': 'cost0'}), '(x=cost0)\n', (4628, 4637), True, 'import paddle.fluid as fluid\n'), ((4658, 4684), 'paddle.fluid.layers.mean', 'fluid.layers.mean', ([], {'x': 'cost1'}), '(x=cost1)\n', (4675, 4684), True, 'import paddle.fluid as fluid\n'), ((4705, 4731), 'paddle.fluid.layers.mean', 'fluid.layers.mean', ([], {'x': 'cost2'}), '(x=cost2)\n', (4722, 4731), True, 'import paddle.fluid as fluid\n'), ((4817, 4868), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', ([], {'input': 'out0', 'label': 'label', 'k': '(1)'}), '(input=out0, label=label, k=1)\n', (4838, 4868), True, 'import paddle.fluid as fluid\n'), ((4888, 4939), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', ([], {'input': 'out0', 'label': 'label', 'k': '(5)'}), '(input=out0, label=label, k=5)\n', (4909, 4939), True, 'import paddle.fluid as fluid\n'), ((5023, 5073), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'out', 'label': 'label'}), '(input=out, label=label)\n', (5049, 5073), True, 'import paddle.fluid as fluid\n'), ((5094, 5119), 'paddle.fluid.layers.mean', 'fluid.layers.mean', ([], {'x': 'cost'}), '(x=cost)\n', (5111, 5119), True, 'import paddle.fluid as fluid\n'), ((5139, 5189), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', ([], {'input': 'out', 'label': 'label', 'k': '(1)'}), '(input=out, label=label, k=1)\n', (5160, 5189), True, 'import paddle.fluid as fluid\n'), ((5209, 5259), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', ([], {'input': 'out', 'label': 'label', 'k': '(5)'}), '(input=out, label=label, k=5)\n', (5230, 5259), True, 'import paddle.fluid as fluid\n'), ((5856, 5874), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (5871, 5874), True, 'import paddle.fluid as fluid\n'), ((5896, 5912), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (5910, 5912), True, 'import paddle.fluid as fluid\n'), ((5957, 5988), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (5986, 5988), True, 'import paddle.fluid as fluid\n'), ((6030, 6073), 'paddle.fluid.io.load_persistables', 'fluid.io.load_persistables', (['exe', 'checkpoint'], {}), '(exe, checkpoint)\n', (6056, 6073), True, 'import paddle.fluid as fluid\n'), ((6213, 6274), 'paddle.fluid.io.load_vars', 'fluid.io.load_vars', (['exe', 'pretrained_model'], {'predicate': 'if_exist'}), '(exe, pretrained_model, predicate=if_exist)\n', (6231, 6274), True, 'import paddle.fluid as fluid\n'), ((6725, 6739), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (6736, 6739), False, 'import random\n'), ((6748, 6765), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6762, 6765), True, 'import numpy as np\n'), ((7242, 7275), 'os.getenv', 'os.getenv', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (7251, 7275), False, 'import os\n'), ((9928, 9946), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9944, 9946), False, 'import sys\n'), ((10151, 10194), 'paddle.fluid.io.save_persistables', 'fluid.io.save_persistables', (['exe', 'model_path'], {}), '(exe, model_path)\n', (10177, 10194), True, 'import paddle.fluid as fluid\n'), ((4165, 4196), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (4194, 4196), True, 'import paddle.fluid as fluid\n'), ((5280, 5308), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (5306, 5308), True, 'import paddle.fluid as fluid\n'), ((5813, 5841), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (5839, 5841), True, 'import paddle.fluid as fluid\n'), ((6404, 6418), 'reader.train', 'reader.train', ([], {}), '()\n', (6416, 6418), False, 'import reader\n'), ((6484, 6496), 'reader.val', 'reader.val', ([], {}), '()\n', (6494, 6496), False, 'import reader\n'), ((6815, 6844), 'paddle.dataset.flowers.train', 'flowers.train', ([], {'use_xmap': '(False)'}), '(use_xmap=False)\n', (6828, 6844), True, 'import paddle.dataset.flowers as flowers\n'), ((6923, 6951), 'paddle.dataset.flowers.test', 'flowers.test', ([], {'use_xmap': '(False)'}), '(use_xmap=False)\n', (6935, 6951), True, 'import paddle.dataset.flowers as flowers\n'), ((7530, 7541), 'time.time', 'time.time', ([], {}), '()\n', (7539, 7541), False, 'import time\n'), ((7640, 7651), 'time.time', 'time.time', ([], {}), '()\n', (7649, 7651), False, 'import time\n'), ((8637, 8648), 'time.time', 'time.time', ([], {}), '()\n', (8646, 8648), False, 'import time\n'), ((8844, 8855), 'time.time', 'time.time', ([], {}), '()\n', (8853, 8855), False, 'import time\n'), ((8904, 8917), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (8911, 8917), True, 'import numpy as np\n'), ((8937, 8950), 'numpy.mean', 'np.mean', (['acc1'], {}), '(acc1)\n', (8944, 8950), True, 'import numpy as np\n'), ((8970, 8983), 'numpy.mean', 'np.mean', (['acc5'], {}), '(acc5)\n', (8977, 8983), True, 'import numpy as np\n'), ((9541, 9561), 'numpy.sum', 'np.sum', (['test_info[0]'], {}), '(test_info[0])\n', (9547, 9561), True, 'import numpy as np\n'), ((9588, 9608), 'numpy.sum', 'np.sum', (['test_info[1]'], {}), '(test_info[1])\n', (9594, 9608), True, 'import numpy as np\n'), ((9635, 9655), 'numpy.sum', 'np.sum', (['test_info[2]'], {}), '(test_info[2])\n', (9641, 9655), True, 'import numpy as np\n'), ((10080, 10105), 'os.path.isdir', 'os.path.isdir', (['model_path'], {}), '(model_path)\n', (10093, 10105), False, 'import os\n'), ((10119, 10142), 'os.makedirs', 'os.makedirs', (['model_path'], {}), '(model_path)\n', (10130, 10142), False, 'import os\n'), ((2396, 2450), 'paddle.fluid.layers.piecewise_decay', 'fluid.layers.piecewise_decay', ([], {'boundaries': 'bd', 'values': 'lr'}), '(boundaries=bd, values=lr)\n', (2424, 2450), True, 'import paddle.fluid as fluid\n'), ((2522, 2555), 'paddle.fluid.regularizer.L2Decay', 'fluid.regularizer.L2Decay', (['(0.0001)'], {}), '(0.0001)\n', (2547, 2555), True, 'import paddle.fluid as fluid\n'), ((6162, 6202), 'os.path.join', 'os.path.join', (['pretrained_model', 'var.name'], {}), '(pretrained_model, var.name)\n', (6174, 6202), False, 'import os\n'), ((7708, 7722), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (7716, 7722), True, 'import numpy as np\n'), ((7751, 7765), 'numpy.array', 'np.array', (['acc1'], {}), '(acc1)\n', (7759, 7765), True, 'import numpy as np\n'), ((7794, 7808), 'numpy.array', 'np.array', (['acc5'], {}), '(acc5)\n', (7802, 7808), True, 'import numpy as np\n'), ((8298, 8316), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8314, 8316), False, 'import sys\n'), ((8339, 8362), 'numpy.array', 'np.array', (['train_info[0]'], {}), '(train_info[0])\n', (8347, 8362), True, 'import numpy as np\n'), ((8391, 8414), 'numpy.array', 'np.array', (['train_info[1]'], {}), '(train_info[1])\n', (8399, 8414), True, 'import numpy as np\n'), ((8443, 8466), 'numpy.array', 'np.array', (['train_info[2]'], {}), '(train_info[2])\n', (8451, 8466), True, 'import numpy as np\n'), ((9501, 9519), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9517, 9519), False, 'import sys\n'), ((2965, 3036), 'models.learning_rate.cosine_decay', 'cosine_decay', ([], {'learning_rate': 'lr', 'step_each_epoch': 'step', 'epochs': 'num_epochs'}), '(learning_rate=lr, step_each_epoch=step, epochs=num_epochs)\n', (2977, 3036), False, 'from models.learning_rate import cosine_decay\n'), ((3108, 3141), 'paddle.fluid.regularizer.L2Decay', 'fluid.regularizer.L2Decay', (['(0.0001)'], {}), '(0.0001)\n', (3133, 3141), True, 'import paddle.fluid as fluid\n'), ((3306, 3339), 'paddle.fluid.regularizer.L2Decay', 'fluid.regularizer.L2Decay', (['(0.0001)'], {}), '(0.0001)\n', (3331, 3339), True, 'import paddle.fluid as fluid\n'), ((8496, 8516), 'numpy.array', 'np.array', (['train_time'], {}), '(train_time)\n', (8504, 8516), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import base64
import cStringIO
import time
import numpy
from PIL import Image
from flask import (Flask, request, render_template, url_for, flash, redirect,
send_file)
from SeamErasure import seam_erasure, obj_reader, util
from SeamErasure.lib import weight_data
app = Flask(__name__)
ALLOWED_EXTENSIONS = set(['png', 'tga', 'jpg', 'jpeg', 'gif', 'tif', 'tiff'])
def allowed_file(filename):
return ('.' in filename and filename.rsplit('.', 1)[1].lower() in
ALLOWED_EXTENSIONS)
def is_data_file(filename):
return "data" in request.form or ('.' in filename and
filename.rsplit('.', 1)[1].lower() == "data")
def upload_file(fileID):
""" Returns the uploaded file with fileID. None if no file uploaded. """
if request.method == 'POST':
if fileID not in request.files:
return None
else:
inFile = request.files[fileID]
if inFile.filename == '':
return None
elif inFile: # and allowed_file(file.filename)
return inFile
else:
return None
@app.route('/')
def index():
return render_template('min-form.html')
@app.route('/erased', methods=['GET', 'POST'])
def erase():
if request.method == 'POST':
try:
startTime = time.time()
# Check the uploaded files
obj_file = upload_file("obj-input")
if not obj_file or ('.' in obj_file.filename and
obj_file.filename.rsplit('.', 1)[1].lower() != "obj"):
return render_template('min-error.html',
error_msg="No OBJ model provided.")
tex_file = upload_file("tex-input")
if not tex_file:
return render_template('min-error.html',
error_msg="No texture image provided.")
mesh = obj_reader.quads_to_triangles(
obj_reader.parse_obj(obj_file))
isFloatTexture = isDataFile = False
if(is_data_file(tex_file.filename)):
textureData = weight_data.read_tex_from_file(tex_file)[0]
isFloatTexture, isDataFile = True, True
else:
textureData = numpy.array(Image.open(tex_file).transpose(
Image.FLIP_TOP_BOTTOM))
isFloatTexture = not issubclass(textureData.dtype.type,
numpy.integer)
if(not isFloatTexture):
textureData = textureData / 255.0
height, width, depth = (textureData.shape + (1,))[:3]
sv_methods = {"none": seam_erasure.SeamValueMethod.NONE,
"texture": seam_erasure.SeamValueMethod.TEXTURE,
"lerp": seam_erasure.SeamValueMethod.LERP}
sv_method = sv_methods[request.form["sv"]]
do_global = "global" in request.form
out = seam_erasure.erase_seam(mesh, textureData,
do_global=do_global, sv_method=sv_method,
display_energy_file=None)
out = out.reshape((height, width, -1))
if(out.shape[2] < 2):
out = numpy.squeeze(out, axis=2)
if(not isFloatTexture):
out = util.to_uint8(out)
base, ext = os.path.splitext(os.path.basename(tex_file.filename))
out_filename = base + "-erased" + ext
if isDataFile:
img_io = cStringIO.StringIO()
weight_data.write_tex_to_file(img_io, textureData)
img_io.seek(0)
return send_file(img_io, as_attachment=True,
attachment_filename=out_filename)
else:
texture = Image.fromarray(out).transpose(Image.FLIP_TOP_BOTTOM)
img_io = cStringIO.StringIO()
texture.save(img_io, format=Image.EXTENSION[ext])
img_io.seek(0)
if isFloatTexture:
return send_file(img_io, as_attachment=True,
attachment_filename=out_filename)
data_uri = base64.b64encode(img_io.getvalue())
try:
return render_template('min-results.html',
min_tex=data_uri, runtime=("%.2f" %
(time.time() - startTime)),
mime_type=Image.MIME[Image.EXTENSION[ext]])
except Exception:
return send_file(img_io, as_attachment=True,
attachment_filename=out_filename)
except Exception as e:
return render_template('min-error.html',
error_msg=("Unable to erase the texture (%s)." % e.message))
return render_template('min-form.html')
if __name__ == '__main__':
app.run(debug=True)
| [
"flask.render_template",
"PIL.Image.fromarray",
"cStringIO.StringIO",
"SeamErasure.obj_reader.parse_obj",
"SeamErasure.lib.weight_data.write_tex_to_file",
"PIL.Image.open",
"flask.Flask",
"SeamErasure.lib.weight_data.read_tex_from_file",
"SeamErasure.seam_erasure.erase_seam",
"numpy.squeeze",
"o... | [((326, 341), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (331, 341), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((1175, 1207), 'flask.render_template', 'render_template', (['"""min-form.html"""'], {}), "('min-form.html')\n", (1190, 1207), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((4759, 4791), 'flask.render_template', 'render_template', (['"""min-form.html"""'], {}), "('min-form.html')\n", (4774, 4791), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((1340, 1351), 'time.time', 'time.time', ([], {}), '()\n', (1349, 1351), False, 'import time\n'), ((2931, 3046), 'SeamErasure.seam_erasure.erase_seam', 'seam_erasure.erase_seam', (['mesh', 'textureData'], {'do_global': 'do_global', 'sv_method': 'sv_method', 'display_energy_file': 'None'}), '(mesh, textureData, do_global=do_global, sv_method=\n sv_method, display_energy_file=None)\n', (2954, 3046), False, 'from SeamErasure import seam_erasure, obj_reader, util\n'), ((1598, 1667), 'flask.render_template', 'render_template', (['"""min-error.html"""'], {'error_msg': '"""No OBJ model provided."""'}), "('min-error.html', error_msg='No OBJ model provided.')\n", (1613, 1667), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((1789, 1862), 'flask.render_template', 'render_template', (['"""min-error.html"""'], {'error_msg': '"""No texture image provided."""'}), "('min-error.html', error_msg='No texture image provided.')\n", (1804, 1862), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((1950, 1980), 'SeamErasure.obj_reader.parse_obj', 'obj_reader.parse_obj', (['obj_file'], {}), '(obj_file)\n', (1970, 1980), False, 'from SeamErasure import seam_erasure, obj_reader, util\n'), ((3182, 3208), 'numpy.squeeze', 'numpy.squeeze', (['out'], {'axis': '(2)'}), '(out, axis=2)\n', (3195, 3208), False, 'import numpy\n'), ((3267, 3285), 'SeamErasure.util.to_uint8', 'util.to_uint8', (['out'], {}), '(out)\n', (3280, 3285), False, 'from SeamErasure import seam_erasure, obj_reader, util\n'), ((3328, 3363), 'os.path.basename', 'os.path.basename', (['tex_file.filename'], {}), '(tex_file.filename)\n', (3344, 3363), False, 'import os\n'), ((3467, 3487), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (3485, 3487), False, 'import cStringIO\n'), ((3504, 3554), 'SeamErasure.lib.weight_data.write_tex_to_file', 'weight_data.write_tex_to_file', (['img_io', 'textureData'], {}), '(img_io, textureData)\n', (3533, 3554), False, 'from SeamErasure.lib import weight_data\n'), ((3610, 3681), 'flask.send_file', 'send_file', (['img_io'], {'as_attachment': '(True)', 'attachment_filename': 'out_filename'}), '(img_io, as_attachment=True, attachment_filename=out_filename)\n', (3619, 3681), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((3825, 3845), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (3843, 3845), False, 'import cStringIO\n'), ((4637, 4734), 'flask.render_template', 'render_template', (['"""min-error.html"""'], {'error_msg': "('Unable to erase the texture (%s).' % e.message)"}), "('min-error.html', error_msg=\n 'Unable to erase the texture (%s).' % e.message)\n", (4652, 4734), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((2110, 2150), 'SeamErasure.lib.weight_data.read_tex_from_file', 'weight_data.read_tex_from_file', (['tex_file'], {}), '(tex_file)\n', (2140, 2150), False, 'from SeamErasure.lib import weight_data\n'), ((4006, 4077), 'flask.send_file', 'send_file', (['img_io'], {'as_attachment': '(True)', 'attachment_filename': 'out_filename'}), '(img_io, as_attachment=True, attachment_filename=out_filename)\n', (4015, 4077), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((3746, 3766), 'PIL.Image.fromarray', 'Image.fromarray', (['out'], {}), '(out)\n', (3761, 3766), False, 'from PIL import Image\n'), ((4491, 4562), 'flask.send_file', 'send_file', (['img_io'], {'as_attachment': '(True)', 'attachment_filename': 'out_filename'}), '(img_io, as_attachment=True, attachment_filename=out_filename)\n', (4500, 4562), False, 'from flask import Flask, request, render_template, url_for, flash, redirect, send_file\n'), ((2270, 2290), 'PIL.Image.open', 'Image.open', (['tex_file'], {}), '(tex_file)\n', (2280, 2290), False, 'from PIL import Image\n'), ((4335, 4346), 'time.time', 'time.time', ([], {}), '()\n', (4344, 4346), False, 'import time\n')] |
import matplotlib, numpy
from . import plot
import markdown
import tabulate
md_extensions = [
'markdown.extensions.tables',
'markdown.extensions.extra'
]
class HTMLElement(object):
tag = None
childtag = None
def __init__(self, content=None, **kwargs):
self.content = []
self.meta = kwargs
if content:
self.__add__(content)
def __repr__(self):
output = ""
attributes = ""
for attr, val in self.meta.items():
if attr=="cl": attr="class"
attributes += """{}='{}'""".format(attr, val)
if self.tag: output += "<{} {}>".format(self.tag, attributes)
for item in self.content:
if self.childtag:
output += "<{0}>{1}</{0}>".format(self.childtag, str(item))
else:
output += str(item)
if self.tag: output += "</{}>".format(self.tag)
return output
def __str__(self):
return self.__repr__()
def __iadd__(self, item):
self.__add__(item)
return self.content
def __add__(self, item):
#if isinstance(item, list) and isinstance(item[0], list):
# self.content.append( )
if type(item) in handlers.keys():
self.content.append(handlers[type(item)](item))
else:
self.content.append(item)
class OrderedList(HTMLElement):
tag = "ol"
childtag = "li"
def __add__(self, items):
for item in items:
if type(item) in handlers.keys():
self.content.append(handlers[type(item)](item))
else:
self.content.append(item)
class Table(HTMLElement):
tag = "table"
class Row(HTMLElement):
tag = "tr"
childtag = "td"
def __add__(self, items):
for item in items:
if type(item) in handlers.keys():
self.content.append(handlers[type(item)](item))
else:
self.content.append(item)
def dict_to_table(dictionary):
table = Table(cl="table table-sm table-striped table-bordered")
for key, val in dictionary.items():
table + Row([key, val])
return table
handlers = {
str: lambda x: markdown.markdown(str(x), output_format='xhtml5', extensions=md_extensions),
matplotlib.figure.Figure: plot.Figure,
list: OrderedList,
dict: dict_to_table,
numpy.ndarray: lambda x: tabulate.tabulate(x, tablefmt=MyHTMLFormat)
}
from functools import partial
def my_html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs =\
["<{0}{1} class=\"my-cell\">{2}</{0}>"
.format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr class=\"my-row\">" + \
"".join(values_with_attrs).rstrip() + \
"</tr>"
MyHTMLFormat = tabulate.TableFormat(
lineabove=tabulate.Line("<table class=\"table table-sm\">", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=tabulate.Line("</table>", "", "", ""),
headerrow=partial(my_html_row_with_attrs, "th"),
datarow=partial(my_html_row_with_attrs, "td"),
padding=0, with_header_hide=None)
| [
"tabulate.tabulate",
"tabulate.Line",
"functools.partial"
] | [((2441, 2484), 'tabulate.tabulate', 'tabulate.tabulate', (['x'], {'tablefmt': 'MyHTMLFormat'}), '(x, tablefmt=MyHTMLFormat)\n', (2458, 2484), False, 'import tabulate\n'), ((3147, 3206), 'tabulate.Line', 'tabulate.Line', (['"""<table class="table table-sm">"""', '""""""', '""""""', '""""""'], {}), '(\'<table class="table table-sm">\', \'\', \'\', \'\')\n', (3160, 3206), False, 'import tabulate\n'), ((3288, 3325), 'tabulate.Line', 'tabulate.Line', (['"""</table>"""', '""""""', '""""""', '""""""'], {}), "('</table>', '', '', '')\n", (3301, 3325), False, 'import tabulate\n'), ((3345, 3382), 'functools.partial', 'partial', (['my_html_row_with_attrs', '"""th"""'], {}), "(my_html_row_with_attrs, 'th')\n", (3352, 3382), False, 'from functools import partial\n'), ((3400, 3437), 'functools.partial', 'partial', (['my_html_row_with_attrs', '"""td"""'], {}), "(my_html_row_with_attrs, 'td')\n", (3407, 3437), False, 'from functools import partial\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tools for setting up and using the data recorder of a PI device."""
from logging import debug, warning
from time import sleep, time
from pipython.pitools import FrozenClass
# seconds
SERVOTIMES = {
'C-413K011': 0.00003333333,
'C-663.11': 50E-6,
'C-702.00': 100E-6,
'C-843': 410E-6,
'C-863.11': 50E-6,
'C-867.160': 50E-6, # verified
'C-867.260': 50E-6, # verified
'C-867.262': 50E-6, # verified
'C-867K016': 50E-6, # verified
'C-867.B0017': 100E-6,
'C-867.B0019': 100E-6,
'C-867.B024': 100E-6,
'C-867.K0020': 100E-6,
'C-867.OE': 50E-6,
'C-867K021': 100E-6,
'C-877': 100E-6,
'C-880': 4096E-6,
'C-884.4D': 50E-6,
'C-884.4DB': 50E-6,
'C-887': 20E-6,
'E-710': 200E-6,
'E-755': 200E-6,
'E-861': 50E-6,
'E-861.11C885': 50E-6,
'E-871.1A1': 50E-6,
'E-873': 50E-6,
'E-873.1A1': 50E-6,
'E-873.3QTU': 50E-6,
}
MAXNUMVALUES = {
'C-413K011': 4096,
'C-663.11': 1024,
'C-702.00': 262144,
'C-863.11': 1024,
'C-863.12': 1024,
'C-867.160': 8192, # verified
'C-867.1U': 8192, # verified
'C-867.260': 8192, # verified
'C-867.262': 8192, # verified
'C-867K016': 8192, # verified
'C-867K028': 1024, # verified
'C-867K031': 8192, # verified
'C-867K036 APP': 1024, # verified
'C-867.2U': 8192, # verified
'C-867.2U2': 8192, # verified
'C-867.B0017': 8192,
'C-867.B0019': 8192,
'C-867.B024': 8192,
'C-867.K0020': 8192,
'C-867.OE': 1024,
'C-867K021': 8192,
'C-877': 1024,
'C-877.1U11': 1024, # verified
'C-877.2U12': 1024, # verified
'C-884.4D': 8192,
'C-884.4DB': 8192,
'E-761': 8192,
'E-861': 1024,
'E-861.11C885': 1024,
'E-871.1A1': 1024,
'E-873': 1024,
'E-873.1A1': 1024,
'E-873.3QTU': 1024,
}
class RecordOptions(object): # Too few public methods pylint: disable=R0903
"""Defines for the kind of data to be recorded."""
NOTHING_0 = 0
COMMANDED_POSITION_1 = 1
ACTUAL_POSITION_2 = 2
POSITION_ERROR_3 = 3
PIO_VALUE_4 = 4
DIO_VALUE_5 = 5
COMEDI_VALUE_6 = 6
PIEZO_VOLTAGE_7 = 7
TIMESTAMP_8 = 8
INDEX_9 = 9
TICKS_10 = 10
DDL_OUTPUT_13 = 13
OPENLOOP_INPUT_14 = 14
PID_OUTPUT_15 = 15
ANALOG_OUTPUT_16 = 16
SENSOR_NORMALIZED_17 = 17
SENSOR_FILTERED_18 = 18
SENSOR_ELEC_LIN_19 = 19
SENSOR_MECH_LIN_20 = 20
TARGET_SLEWRATE_LIM_22 = 22
TARGET_VELOCITY_23 = 23
TARGET_ACCELERATION_24 = 24
TARGET_JERK_25 = 25
DI_VALUE_26 = 26
DO_VALUE_27 = 27
CTV_TARGET_VALUE_28 = 28
CCV_CONTROL_VALUE_29 = 29
CAV_ACTUAL_VALUE_30 = 30
CCV_CURRENT_VALUE_31 = 31
DRIFT_COMP_OFFSET_32 = 32
HYBRID_MOTOR_VOLTAGE_33 = 33
HYBRID_PIEZO_VOLTAGE_34 = 34
SYSTEM_TIME_44 = 44
COMMANDED_VELOCITY_70 = 70
COMMANDED_ACCELERATION_71 = 71
ACTUAL_VELOCITY_72 = 72
MOTOR_OUTPUT_73 = 73
KP_OF_AXIS_74 = 74
KI_OF_AXIS_75 = 75
KD_OF_AXIS_76 = 76
SIGNAL_STATUS_REGISTER_80 = 80
ANALOG_INPUT_81 = 81
ACTIVE_PARAMETERSET_90 = 90
ACTUAL_FREQUENCY_91 = 91
P0_92 = 92
DIA_93 = 93
class TriggerSources(object): # Too few public methods pylint: disable=R0903
"""Defines for sources that can trigger data recording."""
DEFAULT_0 = 0
POSITION_CHANGING_COMMAND_1 = 1
NEXT_COMMAND_WITH_RESET_2 = 2
EXTERNAL_TRIGGER_3 = 3
TRIGGER_IMMEDIATELY_4 = 4
DIO_CHANNEL_5 = 5
POS_CHANGING_WITH_RESET_6 = 6
SMO_COMMAND_WITH_RESET_7 = 7
COMEDI_CHANNEL_8 = 8
WAVE_GENERATOR_9 = 9
def __getopt(name, enumclass):
"""Return item of 'enumclass' which name parts start with 'name'.
@param name : Short name of item, e.g. "CUR_POS". Case insensitive, separated by "_".
@param enumclass : Class name that contains enums.
@return : According enum value as integer.
"""
for item in dir(enumclass):
match = []
for i, itempart in enumerate(item.split('_')):
if itempart.isdigit():
continue
try:
namepart = name.split('_')[i]
except IndexError:
continue
match.append(__isabbreviation(namepart.upper(), itempart.upper()))
if all(match):
return getattr(enumclass, item)
def __isabbreviation(abbrev, item):
"""Return True if first char of 'abbrev' and 'item' match and all chars of 'abbrev' occur in 'item' in this order.
@param abbrev : Case sensitive string.
@param item : Case sensitive string.
@return : True if 'abbrev' is an abbreviation of 'item'.
"""
if not abbrev:
return True
if not item:
return False
if abbrev[0] != item[0]:
return False
return any(__isabbreviation(abbrev[1:], item[i + 1:]) for i in range(len(item)))
def getrecopt(name):
"""Return record option value according to 'name'.
@param name: Short name of item, e.g. "CUR_POS". Case insensitive, separated by "_".
@return : According enum value as integer.
"""
return __getopt(name, RecordOptions)
def gettrigsources(name):
"""Return trigger option value according to 'name'.
@param name: Short name of item, e.g. "CUR_POS". Case insensitive, separated by "_".
@return : According enum value as integer.
"""
return __getopt(name, TriggerSources)
def getservotime(gcs):
"""Return current servo cycle time in seconds as float.
@type gcs : pipython.gcscommands.GCSCommands
@return : Current servo cycle time in seconds as float.
"""
servotime = None
if gcs.devname in ['C-702.00']:
servotime = SERVOTIMES[gcs.devname]
if servotime is None:
servotime = gcs.getparam(0x0E000200) # SERVO_UPDATE_TIME
if servotime is None:
if gcs.devname in SERVOTIMES:
servotime = SERVOTIMES[gcs.devname]
if servotime is None:
raise NotImplementedError('servo cycle time for %r is unknown' % gcs.devname)
return float(servotime)
def getmaxnumvalues(gcs): # 'getmaxnumvalues' is too complex (11) pylint: disable=C0901
"""Return maximum possible number of data recorder values as integer.
@type gcs : pipython.gcscommands.GCSCommands
@return : Maximum possible number of data recorder values as integer.
"""
maxnumvalues = None
if gcs.devname in ['C-702.00']:
maxnumvalues = MAXNUMVALUES[gcs.devname]
if not maxnumvalues:
# E-517, E-518, E-852
maxnumvalues = gcs.getparam(0x16000201) # DATA REC SET POINTS
if not maxnumvalues:
# E-709, E-712, E-725, E-753.1CD, E-727, E-723K001
maxpoints = gcs.getparam(0x16000200) # DATA_REC_MAX_POINTS
numtables = gcs.getparam(0x16000300) # DATA_REC_CHAN_NUMBER
if maxpoints and numtables:
maxnumvalues = int(maxpoints / numtables)
if not maxnumvalues:
# C-843
maxpoints = gcs.getparam(0x16000200) # DATA_REC_MAX_POINTS
if maxpoints:
maxnumvalues = int(maxpoints / gcs.qTNR())
if not maxnumvalues:
# Mercury, etc.
maxnumvalues = gcs.getparam(0x16000001) # RECORDCYCLES_PER_TRIGGER
if not maxnumvalues:
if gcs.devname in MAXNUMVALUES:
maxnumvalues = MAXNUMVALUES[gcs.devname]
if not maxnumvalues:
raise NotImplementedError('maximum number of data recorder values for %r is unknown' % gcs.devname)
return maxnumvalues
class Datarecorder(FrozenClass):
"""Set up and use the data recorder of a PI device."""
def __init__(self, gcs):
"""Set up and use the data recorder of a PI device connected via 'gcs'.
@type gcs : pipython.gcscommands.GCSCommands
"""
super(Datarecorder, self).__init__()
debug('create an instance of Datarecorder(gcs=%s)', str(gcs))
self.__gcs = gcs
self.__cfg = {
'servotime': None,
'numvalues': None,
'offset': None,
'maxnumvalues': None,
'samplerate': None,
'sources': None,
'options': None,
'trigsources': None,
'rectables': [],
}
self._freeze()
@property
def servotime(self):
"""Return current servo cycle time in seconds as float."""
if self.__cfg['servotime'] is None:
self.__cfg['servotime'] = getservotime(self.__gcs)
debug('Datarecorder.servotime is %g secs', self.__cfg['servotime'])
return self.__cfg['servotime']
@servotime.setter
def servotime(self, value):
"""Set current servo cycle time in seconds as float."""
value = float(value)
self.__cfg['servotime'] = value
debug('Datarecorder.servotime set to %g secs', self.__cfg['servotime'])
@property
def numvalues(self):
"""Return number of data recorder values to record as integer."""
if self.__cfg['numvalues'] is None:
self.numvalues = self.maxnumvalues
return self.__cfg['numvalues']
@numvalues.setter
def numvalues(self, value):
"""Set number of data recorder values to record to 'value' as integer."""
value = int(value)
if value > self.maxnumvalues:
raise ValueError('%d exceeds the maximum number of data recorder values %d' % (value, self.maxnumvalues))
self.__cfg['numvalues'] = value
debug('Datarecorder.numvalues: set to %d', self.__cfg['numvalues'])
@property
def offset(self):
"""Return start point in the record table as integer, starts with index 1."""
if self.__cfg['offset'] is None:
if self.numvalues:
return 1
return self.__cfg['offset']
@offset.setter
def offset(self, value):
"""Set start point in the record table as integer, starts with index 1."""
value = int(value)
self.__cfg['offset'] = value
debug('Datarecorder.offset: set to %d', self.__cfg['offset'])
@property
def maxnumvalues(self):
"""Return maximum possible number of data recorder values as integer."""
if self.__cfg['maxnumvalues'] is None:
self.__cfg['maxnumvalues'] = getmaxnumvalues(self.__gcs)
debug('Datarecorder.maxnumvalues is %d', self.__cfg['maxnumvalues'])
return self.__cfg['maxnumvalues']
@maxnumvalues.setter
def maxnumvalues(self, value):
"""Set maximum possible number of data recorder values as integer."""
value = int(value)
self.__cfg['maxnumvalues'] = value
debug('Datarecorder.maxnumvalues: set to %d', self.__cfg['maxnumvalues'])
@property
def samplerate(self):
"""Return current sampling rate in multiples of servo cycle time as integer."""
if self.__cfg['samplerate'] is None:
if self.__gcs.HasqRTR():
self.__cfg['samplerate'] = self.__gcs.qRTR()
else:
warning('device %r does not support the RTR? command', self.__gcs.devname)
self.__cfg['samplerate'] = 1
return self.__cfg['samplerate']
@samplerate.setter
def samplerate(self, value):
"""Set current sampling rate to 'value' in multiples of servo cycle time as integer."""
value = max(1, int(value))
if self.__gcs.HasRTR():
self.__gcs.RTR(value)
self.__cfg['samplerate'] = value
else:
warning('device %r does not support the RTR command', self.__gcs.devname)
self.__cfg['samplerate'] = 1
debug('Datarecorder.samplerate: set to %d servo cycles', self.__cfg['samplerate'])
@property
def sampletime(self):
"""Return current sampling time in seconds as float."""
return self.samplerate * self.servotime
@sampletime.setter
def sampletime(self, value):
"""Set current sampling time to 'value' in seconds as float."""
self.samplerate = int(float(value) / self.servotime)
debug('Datarecorder.sampletime: set to %g s', self.sampletime)
@property
def samplefreq(self):
"""Return current sampling frequency in Hz as float."""
return 1. / self.sampletime
@samplefreq.setter
def samplefreq(self, value):
"""Set current sampling frequency to 'value' in Hz as float."""
self.sampletime = 1. / float(value)
debug('Datarecorder.samplefreq: set to %.2f Hz', self.samplefreq)
@property
def rectime(self):
"""Return complete record time in seconds as float."""
return self.numvalues * self.sampletime
@rectime.setter
def rectime(self, value):
"""Set number of values to record according to 'value' as complete record time in seconds as float."""
self.numvalues = float(value) / self.sampletime
debug('Datarecorder.frequency: set to %.2f Hz', self.samplefreq)
@property
def rectimemax(self):
"""Return complete record time in seconds as float."""
return self.maxnumvalues * self.sampletime
@rectimemax.setter
def rectimemax(self, value):
"""Set sample time to record for 'value' seconds (float) with max. number of points."""
self.numvalues = self.maxnumvalues
self.sampletime = float(value) / self.numvalues
debug('Datarecorder.rectimemax: %d values with sampling %g s', self.numvalues, self.sampletime)
@property
def sources(self):
"""Return current record source IDs as list of strings, defaults to first axis."""
self.__cfg['sources'] = self.__cfg['sources'] or self.__gcs.axes[0]
if isinstance(self.__cfg['sources'], (list, tuple)):
return self.__cfg['sources']
return [self.__cfg['sources']] * len(self.rectables)
@sources.setter
def sources(self, value):
"""Set record source IDs as string convertible or list of them."""
self.__cfg['sources'] = value
debug('Datarecorder.sources: set to %r', self.__cfg['sources'])
@sources.deleter
def sources(self):
"""Reset record source IDs."""
self.__cfg['sources'] = None
debug('Datarecorder.sources: reset')
@property
def options(self):
"""Return current record source IDs as list of integers, defaults to RecordOptions.ACTUAL_POSITION_2."""
self.__cfg['options'] = self.__cfg['options'] or RecordOptions.ACTUAL_POSITION_2
if isinstance(self.__cfg['options'], (list, tuple)):
return self.__cfg['options']
return [self.__cfg['options']] * len(self.rectables)
@options.setter
def options(self, value):
"""Set record source IDs as integer convertible or list of them."""
self.__cfg['options'] = value
debug('Datarecorder.options: set to %r', self.__cfg['options'])
@options.deleter
def options(self):
"""Reset record source IDs."""
self.__cfg['options'] = None
debug('Datarecorder.options: reset')
@property
def trigsources(self):
"""Return current trigger source as int or list, defaults to TriggerSources.NEXT_COMMAND_WITH_RESET_2."""
self.__cfg['trigsources'] = self.__cfg['trigsources'] or TriggerSources.NEXT_COMMAND_WITH_RESET_2
return self.__cfg['trigsources']
@trigsources.setter
def trigsources(self, value):
"""Set trigger source IDs. If single integer then "DRT 0" is used. If list
of integers then list size can be 1 or must match the length of self.rectables.
"""
if isinstance(value, tuple):
value = list(value)
self.__cfg['trigsources'] = value
debug('Datarecorder.trigsources: set to %r', self.__cfg['trigsources'])
@trigsources.deleter
def trigsources(self):
"""Reset trigger source IDs."""
self.__cfg['trigsources'] = None
debug('Datarecorder.trigsources: reset')
@property
def rectables(self):
"""Return the record tables as list of integers."""
if isinstance(self.__cfg['sources'], (list, tuple)):
numtables = len(self.__cfg['sources'])
elif isinstance(self.__cfg['options'], (list, tuple)):
numtables = len(self.__cfg['options'])
elif isinstance(self.__cfg['trigsources'], (list, tuple)):
numtables = len(self.__cfg['trigsources'])
else:
numtables = 1
self.__cfg['rectables'] = list(range(1, numtables + 1))
return self.__cfg['rectables']
def wait(self, timeout=0):
"""Wait for end of data recording.
@param timeout : Timeout in seconds, is disabled by default.
"""
assert self.rectables, 'rectables are not set'
numvalues = self.numvalues or self.maxnumvalues
if self.__gcs.HasqDRL():
maxtime = time() + timeout
while min([self.__gcs.qDRL(table)[table] for table in self.rectables]) < numvalues:
if timeout and time() > maxtime:
raise SystemError('timeout after %.1f secs while waiting on data recorder' % timeout)
else:
waittime = 1.2 * self.rectime
debug('Datarecorder.wait: wait %.2f secs for data recording', waittime)
sleep(waittime)
def read(self, offset=None, numvalues=None, verbose=False):
"""Read out the data and return it.
@param offset : Start point in the table as integer, starts with index 1, overwrites self.offset.
@param numvalues : Number of points to be read per table as integer, overwrites self.numvalues.
@param verbose : If True print a line that shows how many values have been read out already.
@return : Tuple of (header, data), see qDRR command.
"""
assert self.rectables, 'rectables are not set'
header = self.__gcs.qDRR(self.rectables, offset or self.offset, numvalues or self.numvalues)
while self.__gcs.bufstate is not True:
if verbose:
print('\rread data {:.1f}%...'.format(self.__gcs.bufstate * 100)),
sleep(0.05)
if verbose:
print('\r%s\r' % (' ' * 20)),
data = self.__gcs.bufdata
return header, data
def getdata(self, timeout=0, offset=None, numvalues=None):
"""Wait for end of data recording, start reading out the data and return the data.
@param timeout : Timeout in seconds, is disabled by default.
@param offset : Start point in the table as integer, starts with index 1, overwrites self.offset.
@param numvalues : Number of points to be read per table as integer, overwrites self.numvalues.
@return : Tuple of (header, data), see qDRR command.
"""
self.wait(timeout)
return self.read(offset, numvalues)
def arm(self):
"""Ready the data recorder with given options and activate the trigger.
If TriggerSources.NEXT_COMMAND_WITH_RESET_2 is used then the error check will be disabled.
"""
if self.__gcs.HasDRC():
for i in range(len(self.rectables)):
self.__gcs.DRC(self.rectables[i], self.sources[i], self.options[i])
else:
warning('device %r does not support the DRC command', self.__gcs.devname)
if self.__gcs.HasDRT():
errcheck = None
if isinstance(self.trigsources, (list, tuple)):
if TriggerSources.NEXT_COMMAND_WITH_RESET_2 in self.trigsources:
errcheck = self.__gcs.errcheck
self.__gcs.errcheck = False
if len(self.trigsources) == 1:
self.trigsources = [self.trigsources[0]] * len(self.rectables)
for i in range(len(self.rectables)):
self.__gcs.DRT(self.rectables[i], self.trigsources[i])
else:
if TriggerSources.NEXT_COMMAND_WITH_RESET_2 == self.trigsources:
errcheck = self.__gcs.errcheck
self.__gcs.errcheck = False
self.__gcs.DRT(0, self.trigsources)
if errcheck is not None:
self.__gcs.errcheck = errcheck
else:
warning('device %r does not support the DRT command', self.__gcs.devname)
@property
def timescale(self):
"""Return list of values for time scale of recorded data."""
return [1. / self.samplerate * x for x in range(self.numvalues)]
| [
"logging.warning",
"logging.debug",
"time.sleep",
"time.time"
] | [((8767, 8838), 'logging.debug', 'debug', (['"""Datarecorder.servotime set to %g secs"""', "self.__cfg['servotime']"], {}), "('Datarecorder.servotime set to %g secs', self.__cfg['servotime'])\n", (8772, 8838), False, 'from logging import debug, warning\n'), ((9451, 9518), 'logging.debug', 'debug', (['"""Datarecorder.numvalues: set to %d"""', "self.__cfg['numvalues']"], {}), "('Datarecorder.numvalues: set to %d', self.__cfg['numvalues'])\n", (9456, 9518), False, 'from logging import debug, warning\n'), ((9979, 10040), 'logging.debug', 'debug', (['"""Datarecorder.offset: set to %d"""', "self.__cfg['offset']"], {}), "('Datarecorder.offset: set to %d', self.__cfg['offset'])\n", (9984, 10040), False, 'from logging import debug, warning\n'), ((10621, 10694), 'logging.debug', 'debug', (['"""Datarecorder.maxnumvalues: set to %d"""', "self.__cfg['maxnumvalues']"], {}), "('Datarecorder.maxnumvalues: set to %d', self.__cfg['maxnumvalues'])\n", (10626, 10694), False, 'from logging import debug, warning\n'), ((11609, 11696), 'logging.debug', 'debug', (['"""Datarecorder.samplerate: set to %d servo cycles"""', "self.__cfg['samplerate']"], {}), "('Datarecorder.samplerate: set to %d servo cycles', self.__cfg[\n 'samplerate'])\n", (11614, 11696), False, 'from logging import debug, warning\n'), ((12043, 12105), 'logging.debug', 'debug', (['"""Datarecorder.sampletime: set to %g s"""', 'self.sampletime'], {}), "('Datarecorder.sampletime: set to %g s', self.sampletime)\n", (12048, 12105), False, 'from logging import debug, warning\n'), ((12428, 12493), 'logging.debug', 'debug', (['"""Datarecorder.samplefreq: set to %.2f Hz"""', 'self.samplefreq'], {}), "('Datarecorder.samplefreq: set to %.2f Hz', self.samplefreq)\n", (12433, 12493), False, 'from logging import debug, warning\n'), ((12869, 12933), 'logging.debug', 'debug', (['"""Datarecorder.frequency: set to %.2f Hz"""', 'self.samplefreq'], {}), "('Datarecorder.frequency: set to %.2f Hz', self.samplefreq)\n", (12874, 12933), False, 'from logging import debug, warning\n'), ((13349, 13449), 'logging.debug', 'debug', (['"""Datarecorder.rectimemax: %d values with sampling %g s"""', 'self.numvalues', 'self.sampletime'], {}), "('Datarecorder.rectimemax: %d values with sampling %g s', self.\n numvalues, self.sampletime)\n", (13354, 13449), False, 'from logging import debug, warning\n'), ((13985, 14048), 'logging.debug', 'debug', (['"""Datarecorder.sources: set to %r"""', "self.__cfg['sources']"], {}), "('Datarecorder.sources: set to %r', self.__cfg['sources'])\n", (13990, 14048), False, 'from logging import debug, warning\n'), ((14178, 14214), 'logging.debug', 'debug', (['"""Datarecorder.sources: reset"""'], {}), "('Datarecorder.sources: reset')\n", (14183, 14214), False, 'from logging import debug, warning\n'), ((14791, 14854), 'logging.debug', 'debug', (['"""Datarecorder.options: set to %r"""', "self.__cfg['options']"], {}), "('Datarecorder.options: set to %r', self.__cfg['options'])\n", (14796, 14854), False, 'from logging import debug, warning\n'), ((14984, 15020), 'logging.debug', 'debug', (['"""Datarecorder.options: reset"""'], {}), "('Datarecorder.options: reset')\n", (14989, 15020), False, 'from logging import debug, warning\n'), ((15685, 15756), 'logging.debug', 'debug', (['"""Datarecorder.trigsources: set to %r"""', "self.__cfg['trigsources']"], {}), "('Datarecorder.trigsources: set to %r', self.__cfg['trigsources'])\n", (15690, 15756), False, 'from logging import debug, warning\n'), ((15899, 15939), 'logging.debug', 'debug', (['"""Datarecorder.trigsources: reset"""'], {}), "('Datarecorder.trigsources: reset')\n", (15904, 15939), False, 'from logging import debug, warning\n'), ((8464, 8531), 'logging.debug', 'debug', (['"""Datarecorder.servotime is %g secs"""', "self.__cfg['servotime']"], {}), "('Datarecorder.servotime is %g secs', self.__cfg['servotime'])\n", (8469, 8531), False, 'from logging import debug, warning\n'), ((10293, 10361), 'logging.debug', 'debug', (['"""Datarecorder.maxnumvalues is %d"""', "self.__cfg['maxnumvalues']"], {}), "('Datarecorder.maxnumvalues is %d', self.__cfg['maxnumvalues'])\n", (10298, 10361), False, 'from logging import debug, warning\n'), ((11486, 11559), 'logging.warning', 'warning', (['"""device %r does not support the RTR command"""', 'self.__gcs.devname'], {}), "('device %r does not support the RTR command', self.__gcs.devname)\n", (11493, 11559), False, 'from logging import debug, warning\n'), ((17189, 17260), 'logging.debug', 'debug', (['"""Datarecorder.wait: wait %.2f secs for data recording"""', 'waittime'], {}), "('Datarecorder.wait: wait %.2f secs for data recording', waittime)\n", (17194, 17260), False, 'from logging import debug, warning\n'), ((17273, 17288), 'time.sleep', 'sleep', (['waittime'], {}), '(waittime)\n', (17278, 17288), False, 'from time import sleep, time\n'), ((19224, 19297), 'logging.warning', 'warning', (['"""device %r does not support the DRC command"""', 'self.__gcs.devname'], {}), "('device %r does not support the DRC command', self.__gcs.devname)\n", (19231, 19297), False, 'from logging import debug, warning\n'), ((20216, 20289), 'logging.warning', 'warning', (['"""device %r does not support the DRT command"""', 'self.__gcs.devname'], {}), "('device %r does not support the DRT command', self.__gcs.devname)\n", (20223, 20289), False, 'from logging import debug, warning\n'), ((11001, 11075), 'logging.warning', 'warning', (['"""device %r does not support the RTR? command"""', 'self.__gcs.devname'], {}), "('device %r does not support the RTR? command', self.__gcs.devname)\n", (11008, 11075), False, 'from logging import debug, warning\n'), ((16853, 16859), 'time.time', 'time', ([], {}), '()\n', (16857, 16859), False, 'from time import sleep, time\n'), ((18108, 18119), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (18113, 18119), False, 'from time import sleep, time\n'), ((16997, 17003), 'time.time', 'time', ([], {}), '()\n', (17001, 17003), False, 'from time import sleep, time\n')] |
import io
class container(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def pack(typ, obj):
stream = io.BytesIO()
typ.pack(stream, obj)
data = stream.getvalue()
stream.close()
return data
def unpack(typ, data):
stream = io.BytesIO(data)
obj = typ.unpack(stream)
extra_data = stream.read()
if extra_data:
raise RuntimeError('too much data', extra_data)
stream.close()
return obj
| [
"io.BytesIO"
] | [((176, 188), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (186, 188), False, 'import io\n'), ((317, 333), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (327, 333), False, 'import io\n')] |
from flask import Flask, jsonify
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
import pandas as pd
import numpy as np
import datetime as dt
import sqlalchemy
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
#Flask
app = Flask(__name__)
#Home Route
@app.route("/")
def home():
return "Possible routes:<br>/api/v1.0/precipitation<br>/api/v1.0/stations<br>/api/v1.0/tobs<br>/api/v1.0/STARTDATE<br>/api/v1.0/STARTDATE/ENDDATE<br><br>Format dates as YYYY-MM-DD"
#Precipitation Route
@app.route("/api/v1.0/precipitation")
def prcp():
latest = dt.date.fromisoformat(session.query(Measurement).order_by(desc(Measurement.date)).first().date)
earliest = latest-dt.timedelta(days=365)
prcp_data = session.query(Measurement.date, Measurement.prcp).filter(earliest<Measurement.date)
prcp_df = pd.read_sql(prcp_data.statement, session.bind)
prcp_df['date']=pd.to_datetime(prcp_df.date)
prcp_df.set_index('date', inplace=True)
prcp_df.sort_index(inplace=True)
#Convert to jsonify
prcp_df.index = prcp_df.index.astype('str')
return jsonify(prcp_df.to_dict())
#station route
@app.route("/api/v1.0/stations")
def stations():
#Stations
stations = session.query(Measurement.station).group_by(Measurement.station)
stations_df = prcp_df = pd.read_sql(stations.statement, session.bind)
return jsonify(stations_df.to_dict())
# Tobs Route
@app.route("/api/v1.0/tobs")
def tobs():
latest = dt.date.fromisoformat(session.query(Measurement).order_by(desc(Measurement.date)).first().date)
earliest = latest-dt.timedelta(days=365)
tobs_data = session.query(Measurement.date, Measurement.tobs).filter(earliest<Measurement.date)
tobs_df = pd.read_sql(tobs_data.statement, session.bind)
tobs_df['date']=pd.to_datetime(tobs_df.date)
tobs_df.set_index('date', inplace=True)
tobs_df.sort_index(inplace=True)
tobs_df.index = tobs_df.index.astype('str')
return jsonify(tobs_df.to_dict())
#start Route
@app.route("/api/v1.0/<start>")
def start_metrics(start):
date_list = session.query(Measurement.date)
date_df = pd.read_sql(date_list.statement, session.bind)
date_dict = date_df.to_dict()
for date in date_dict["date"].values():
if date == start:
return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all())
return jsonify({"error":"date not found"}),404
# End Route
@app.route("/api/v1.0/<start>/<end>")
def start_end_metrics(start,end):
found_start = False
found_end = False
if start > end:
return " Wrong order"
date_list = session.query(Measurement.date)
date_df = pd.read_sql(date_list.statement, session.bind)
date_dict = date_df.to_dict()
for date in date_dict["date"].values():
if date == start:
found_start = True
if (found_start == True) and (found_end == True):
return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all())
if date == end:
found_end = True
if (found_start == True) and (found_end == True):
return jsonify(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all())
return jsonify({"error":"date not found"}),404
if __name__ == "__main__":
app.run(debug=True) | [
"sqlalchemy.func.min",
"flask.Flask",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"sqlalchemy.desc",
"sqlalchemy.func.max",
"sqlalchemy.func.avg",
"pandas.read_sql",
"datetime.timedelta",
"pandas.to_datetime",
"flask.jsonify"
] | [((269, 319), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Resources/hawaii.sqlite"""'], {}), "('sqlite:///Resources/hawaii.sqlite')\n", (282, 319), False, 'from sqlalchemy import create_engine, func, desc\n'), ((328, 342), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (340, 342), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((483, 498), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (490, 498), False, 'from sqlalchemy.orm import Session\n'), ((516, 531), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (521, 531), False, 'from flask import Flask, jsonify\n'), ((1104, 1150), 'pandas.read_sql', 'pd.read_sql', (['prcp_data.statement', 'session.bind'], {}), '(prcp_data.statement, session.bind)\n', (1115, 1150), True, 'import pandas as pd\n'), ((1169, 1197), 'pandas.to_datetime', 'pd.to_datetime', (['prcp_df.date'], {}), '(prcp_df.date)\n', (1183, 1197), True, 'import pandas as pd\n'), ((1579, 1624), 'pandas.read_sql', 'pd.read_sql', (['stations.statement', 'session.bind'], {}), '(stations.statement, session.bind)\n', (1590, 1624), True, 'import pandas as pd\n'), ((1991, 2037), 'pandas.read_sql', 'pd.read_sql', (['tobs_data.statement', 'session.bind'], {}), '(tobs_data.statement, session.bind)\n', (2002, 2037), True, 'import pandas as pd\n'), ((2056, 2084), 'pandas.to_datetime', 'pd.to_datetime', (['tobs_df.date'], {}), '(tobs_df.date)\n', (2070, 2084), True, 'import pandas as pd\n'), ((2390, 2436), 'pandas.read_sql', 'pd.read_sql', (['date_list.statement', 'session.bind'], {}), '(date_list.statement, session.bind)\n', (2401, 2436), True, 'import pandas as pd\n'), ((3002, 3048), 'pandas.read_sql', 'pd.read_sql', (['date_list.statement', 'session.bind'], {}), '(date_list.statement, session.bind)\n', (3013, 3048), True, 'import pandas as pd\n'), ((971, 993), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (983, 993), True, 'import datetime as dt\n'), ((1858, 1880), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1870, 1880), True, 'import datetime as dt\n'), ((2720, 2756), 'flask.jsonify', 'jsonify', (["{'error': 'date not found'}"], {}), "({'error': 'date not found'})\n", (2727, 2756), False, 'from flask import Flask, jsonify\n'), ((3758, 3794), 'flask.jsonify', 'jsonify', (["{'error': 'date not found'}"], {}), "({'error': 'date not found'})\n", (3765, 3794), False, 'from flask import Flask, jsonify\n'), ((913, 935), 'sqlalchemy.desc', 'desc', (['Measurement.date'], {}), '(Measurement.date)\n', (917, 935), False, 'from sqlalchemy import create_engine, func, desc\n'), ((1800, 1822), 'sqlalchemy.desc', 'desc', (['Measurement.date'], {}), '(Measurement.date)\n', (1804, 1822), False, 'from sqlalchemy import create_engine, func, desc\n'), ((2569, 2595), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2577, 2595), False, 'from sqlalchemy import create_engine, func, desc\n'), ((2597, 2623), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2605, 2623), False, 'from sqlalchemy import create_engine, func, desc\n'), ((2625, 2651), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2633, 2651), False, 'from sqlalchemy import create_engine, func, desc\n'), ((3265, 3291), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3273, 3291), False, 'from sqlalchemy import create_engine, func, desc\n'), ((3293, 3319), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3301, 3319), False, 'from sqlalchemy import create_engine, func, desc\n'), ((3321, 3347), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3329, 3347), False, 'from sqlalchemy import create_engine, func, desc\n'), ((3574, 3600), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3582, 3600), False, 'from sqlalchemy import create_engine, func, desc\n'), ((3602, 3628), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3610, 3628), False, 'from sqlalchemy import create_engine, func, desc\n'), ((3630, 3656), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3638, 3656), False, 'from sqlalchemy import create_engine, func, desc\n')] |
import torch
import torch.nn as nn
from cogdl.utils import spmm
class GINLayer(nn.Module):
r"""Graph Isomorphism Network layer from paper `"How Powerful are Graph
Neural Networks?" <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{sum}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func=None, eps=0, train_eps=True):
super(GINLayer, self).__init__()
if train_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))
else:
self.register_buffer("eps", torch.FloatTensor([eps]))
self.apply_func = apply_func
def forward(self, graph, x):
out = (1 + self.eps) * x + spmm(graph, x)
if self.apply_func is not None:
out = self.apply_func(out)
return out
| [
"torch.FloatTensor",
"cogdl.utils.spmm"
] | [((1076, 1090), 'cogdl.utils.spmm', 'spmm', (['graph', 'x'], {}), '(graph, x)\n', (1080, 1090), False, 'from cogdl.utils import spmm\n'), ((864, 888), 'torch.FloatTensor', 'torch.FloatTensor', (['[eps]'], {}), '([eps])\n', (881, 888), False, 'import torch\n'), ((944, 968), 'torch.FloatTensor', 'torch.FloatTensor', (['[eps]'], {}), '([eps])\n', (961, 968), False, 'import torch\n')] |
# Copyright 2018 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
DDPG implementation in Tensorflow Eager Execution
"""
import numpy as np
import tensorflow as tf
from utils import PytorchInitializer
layers = tf.keras.layers
regularizers = tf.keras.regularizers
losses = tf.keras.losses
class Actor(tf.keras.Model):
def __init__(self, state_dim, action_dim, max_action, name="Actor"):
super().__init__(name=name)
self.l1 = layers.Dense(400, kernel_initializer=PytorchInitializer(),
name="L1")
self.l2 = layers.Dense(300, kernel_initializer=PytorchInitializer(),
name="L2")
self.l3 = layers.Dense(action_dim, kernel_initializer=PytorchInitializer(),
name="L3")
self.max_action = max_action
# 後段の処理のために早めにshapeを確定させる
dummy_state = tf.constant(np.zeros(shape=[1, state_dim], dtype=np.float32))
self(dummy_state)
def call(self, inputs):
with tf.device("/gpu:0"):
features = tf.nn.relu(self.l1(inputs))
features = tf.nn.relu(self.l2(features))
features = self.l3(features)
action = self.max_action * tf.nn.tanh(features)
return action
class Critic(tf.keras.Model):
def __init__(self, state_dim, action_dim, wd=1e-2, name="Critic"):
super().__init__(name=name)
self.l1 = layers.Dense(400, kernel_initializer=PytorchInitializer(),
kernel_regularizer=regularizers.l2(wd), bias_regularizer=regularizers.l2(wd),
name="L1")
self.l2 = layers.Dense(300, kernel_initializer=PytorchInitializer(),
kernel_regularizer=regularizers.l2(wd), bias_regularizer=regularizers.l2(wd),
name="L2")
self.l3 = layers.Dense(1, kernel_initializer=PytorchInitializer(),
kernel_regularizer=regularizers.l2(wd), bias_regularizer=regularizers.l2(wd),
name="L3")
dummy_state = tf.constant(np.zeros(shape=[1, state_dim], dtype=np.float32))
dummy_action = tf.constant(np.zeros(shape=[1, action_dim], dtype=np.float32))
self([dummy_state, dummy_action])
def call(self, inputs):
with tf.device("/gpu:0"):
x, u = inputs
x = tf.nn.relu(self.l1(x))
inner_feat = tf.concat([x, u], axis=1)
x = tf.nn.relu(self.l2(inner_feat))
x = self.l3(x)
return x
class DDPG(tf.contrib.checkpoint.Checkpointable):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action)
self.actor_target = Actor(state_dim, action_dim, max_action)
# initialize target network
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(param)
self.actor_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
self.critic = Critic(state_dim, action_dim)
self.critic_target = Critic(state_dim, action_dim)
# initialize target network
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(param)
self.critic_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
def select_action(self, state):
"""
:param np.ndarray state:
:return:
"""
assert isinstance(state, np.ndarray)
assert len(state.shape) == 1
state = np.expand_dims(state, axis=0).astype(np.float32)
action = self.actor(state).numpy()
return action[0]
def train(self, replay_buffer, iterations, batch_size=64, discount=0.99, tau=0.001):
for it in range(iterations):
state, next_state, action, reward, done = replay_buffer.sample(batch_size)
state = np.array(state, dtype=np.float32)
next_state = np.array(next_state, dtype=np.float32)
action = np.array(action, dtype=np.float32)
reward = np.array(reward, dtype=np.float32)
done = np.array(done, dtype=np.float32)
not_done = 1 - done
with tf.device("/gpu:0"):
with tf.GradientTape() as tape:
target_Q = self.critic_target([next_state, self.actor_target(next_state)])
target_Q = reward + (not_done * discount * target_Q)
# detach => stop_gradient
target_Q = tf.stop_gradient(target_Q)
current_Q = self.critic([state, action])
# Compute critic loss + L2 loss
critic_loss = tf.reduce_mean(losses.MSE(current_Q, target_Q)) + 0.5*tf.add_n(self.critic.losses)
critic_grad = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(zip(critic_grad, self.critic.trainable_variables))
with tf.GradientTape() as tape:
next_action = self.actor(state)
actor_loss = -tf.reduce_mean(self.critic([state, next_action]))
actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))
# Update target networks
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
class DDPG_fast(tf.contrib.checkpoint.Checkpointable):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action)
self.actor_target = Actor(state_dim, action_dim, max_action)
self.actor_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
# initialize target network
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(param)
self.critic = Critic(state_dim, action_dim)
self.critic_target = Critic(state_dim, action_dim)
self.critic_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
# initialize target network
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(param)
def select_action(self, state):
"""
:param np.ndarray state:
:return:
"""
assert isinstance(state, np.ndarray)
assert len(state.shape) == 1
state = np.expand_dims(state, axis=0).astype(np.float32)
action = self._select_action_body(tf.constant(state))
return action.numpy()[0]
@tf.contrib.eager.defun
def _select_action_body(self, state):
"""
:param np.ndarray state:
:return:
"""
action = self.actor(state)
return action
def train(self, replay_buffer, iterations, batch_size=64, discount=0.99, tau=0.001):
for it in range(iterations):
state, next_state, action, reward, done = replay_buffer.sample(batch_size)
state = np.array(state, dtype=np.float32)
next_state = np.array(next_state, dtype=np.float32)
action = np.array(action, dtype=np.float32)
reward = np.array(reward, dtype=np.float32)
done = np.array(done, dtype=np.float32)
not_done = 1 - done
self._train_body(state, next_state, action, reward, not_done, discount, tau)
@tf.contrib.eager.defun
def _train_body(self, state, next_state, action, reward, not_done, discount, tau):
with tf.device("/gpu:0"):
with tf.GradientTape() as tape:
target_Q = self.critic_target([next_state, self.actor_target(next_state)])
target_Q = reward + (not_done * discount * target_Q)
# detach => stop_gradient
target_Q = tf.stop_gradient(target_Q)
current_Q = self.critic([state, action])
# Compute critic loss + L2 loss
critic_loss = tf.reduce_mean(losses.MSE(current_Q, target_Q)) + 0.5*tf.add_n(self.critic.losses)
critic_grad = tape.gradient(critic_loss, self.critic.trainable_variables)
self.critic_optimizer.apply_gradients(zip(critic_grad, self.critic.trainable_variables))
with tf.GradientTape() as tape:
next_action = self.actor(state)
actor_loss = -tf.reduce_mean(self.critic([state, next_action]))
actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))
# Update target networks
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
| [
"tensorflow.device",
"tensorflow.nn.tanh",
"utils.PytorchInitializer",
"tensorflow.GradientTape",
"numpy.array",
"tensorflow.concat",
"numpy.zeros",
"tensorflow.constant",
"tensorflow.stop_gradient",
"numpy.expand_dims",
"tensorflow.add_n",
"tensorflow.train.AdamOptimizer"
] | [((3624, 3668), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (3646, 3668), True, 'import tensorflow as tf\n'), ((3977, 4020), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3999, 4020), True, 'import tensorflow as tf\n'), ((6716, 6760), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (6738, 6760), True, 'import tensorflow as tf\n'), ((7066, 7109), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (7088, 7109), True, 'import tensorflow as tf\n'), ((1514, 1562), 'numpy.zeros', 'np.zeros', ([], {'shape': '[1, state_dim]', 'dtype': 'np.float32'}), '(shape=[1, state_dim], dtype=np.float32)\n', (1522, 1562), True, 'import numpy as np\n'), ((1632, 1651), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1641, 1651), True, 'import tensorflow as tf\n'), ((2737, 2785), 'numpy.zeros', 'np.zeros', ([], {'shape': '[1, state_dim]', 'dtype': 'np.float32'}), '(shape=[1, state_dim], dtype=np.float32)\n', (2745, 2785), True, 'import numpy as np\n'), ((2822, 2871), 'numpy.zeros', 'np.zeros', ([], {'shape': '[1, action_dim]', 'dtype': 'np.float32'}), '(shape=[1, action_dim], dtype=np.float32)\n', (2830, 2871), True, 'import numpy as np\n'), ((2957, 2976), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (2966, 2976), True, 'import tensorflow as tf\n'), ((3069, 3094), 'tensorflow.concat', 'tf.concat', (['[x, u]'], {'axis': '(1)'}), '([x, u], axis=1)\n', (3078, 3094), True, 'import tensorflow as tf\n'), ((4586, 4619), 'numpy.array', 'np.array', (['state'], {'dtype': 'np.float32'}), '(state, dtype=np.float32)\n', (4594, 4619), True, 'import numpy as np\n'), ((4645, 4683), 'numpy.array', 'np.array', (['next_state'], {'dtype': 'np.float32'}), '(next_state, dtype=np.float32)\n', (4653, 4683), True, 'import numpy as np\n'), ((4705, 4739), 'numpy.array', 'np.array', (['action'], {'dtype': 'np.float32'}), '(action, dtype=np.float32)\n', (4713, 4739), True, 'import numpy as np\n'), ((4761, 4795), 'numpy.array', 'np.array', (['reward'], {'dtype': 'np.float32'}), '(reward, dtype=np.float32)\n', (4769, 4795), True, 'import numpy as np\n'), ((4815, 4847), 'numpy.array', 'np.array', (['done'], {'dtype': 'np.float32'}), '(done, dtype=np.float32)\n', (4823, 4847), True, 'import numpy as np\n'), ((7576, 7594), 'tensorflow.constant', 'tf.constant', (['state'], {}), '(state)\n', (7587, 7594), True, 'import tensorflow as tf\n'), ((8069, 8102), 'numpy.array', 'np.array', (['state'], {'dtype': 'np.float32'}), '(state, dtype=np.float32)\n', (8077, 8102), True, 'import numpy as np\n'), ((8128, 8166), 'numpy.array', 'np.array', (['next_state'], {'dtype': 'np.float32'}), '(next_state, dtype=np.float32)\n', (8136, 8166), True, 'import numpy as np\n'), ((8188, 8222), 'numpy.array', 'np.array', (['action'], {'dtype': 'np.float32'}), '(action, dtype=np.float32)\n', (8196, 8222), True, 'import numpy as np\n'), ((8244, 8278), 'numpy.array', 'np.array', (['reward'], {'dtype': 'np.float32'}), '(reward, dtype=np.float32)\n', (8252, 8278), True, 'import numpy as np\n'), ((8298, 8330), 'numpy.array', 'np.array', (['done'], {'dtype': 'np.float32'}), '(done, dtype=np.float32)\n', (8306, 8330), True, 'import numpy as np\n'), ((8581, 8600), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (8590, 8600), True, 'import tensorflow as tf\n'), ((1097, 1117), 'utils.PytorchInitializer', 'PytorchInitializer', ([], {}), '()\n', (1115, 1117), False, 'from utils import PytorchInitializer\n'), ((1216, 1236), 'utils.PytorchInitializer', 'PytorchInitializer', ([], {}), '()\n', (1234, 1236), False, 'from utils import PytorchInitializer\n'), ((1342, 1362), 'utils.PytorchInitializer', 'PytorchInitializer', ([], {}), '()\n', (1360, 1362), False, 'from utils import PytorchInitializer\n'), ((1837, 1857), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['features'], {}), '(features)\n', (1847, 1857), True, 'import tensorflow as tf\n'), ((2075, 2095), 'utils.PytorchInitializer', 'PytorchInitializer', ([], {}), '()\n', (2093, 2095), False, 'from utils import PytorchInitializer\n'), ((2303, 2323), 'utils.PytorchInitializer', 'PytorchInitializer', ([], {}), '()\n', (2321, 2323), False, 'from utils import PytorchInitializer\n'), ((2529, 2549), 'utils.PytorchInitializer', 'PytorchInitializer', ([], {}), '()\n', (2547, 2549), False, 'from utils import PytorchInitializer\n'), ((4232, 4261), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (4246, 4261), True, 'import numpy as np\n'), ((4898, 4917), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (4907, 4917), True, 'import tensorflow as tf\n'), ((7485, 7514), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (7499, 7514), True, 'import numpy as np\n'), ((8620, 8637), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8635, 8637), True, 'import tensorflow as tf\n'), ((8876, 8902), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['target_Q'], {}), '(target_Q)\n', (8892, 8902), True, 'import tensorflow as tf\n'), ((9329, 9346), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9344, 9346), True, 'import tensorflow as tf\n'), ((4941, 4958), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4956, 4958), True, 'import tensorflow as tf\n'), ((5213, 5239), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['target_Q'], {}), '(target_Q)\n', (5229, 5239), True, 'import tensorflow as tf\n'), ((5690, 5707), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5705, 5707), True, 'import tensorflow as tf\n'), ((9094, 9122), 'tensorflow.add_n', 'tf.add_n', (['self.critic.losses'], {}), '(self.critic.losses)\n', (9102, 9122), True, 'import tensorflow as tf\n'), ((5443, 5471), 'tensorflow.add_n', 'tf.add_n', (['self.critic.losses'], {}), '(self.critic.losses)\n', (5451, 5471), True, 'import tensorflow as tf\n')] |
import json
import falcon
import time
import uuid
import requests
from apps.database import init_db, db_session
from apps.models import Account
from apps.restaccount.logging import logging
logger = logging.getLogger(__name__)
from decouple import config
ES_HOST = config('EVENTSTORE_HOST', default='eventstore')
ES_PORT = config('EVENTSTORE_PORT', default=2113, cast=int)
stream_url = 'http://{}:{}/streams/accounts'.format(ES_HOST, ES_PORT)
content_header = { 'Content-Type': 'application/vnd.eventstore.events+json' }
logger.info('stream_url: {}'.format(stream_url))
def get_account(account_id):
return Account.query.get(account_id)
class BalanceResource(object):
def on_get(self, req, resp, account_id):
init_db()
doc = db_session.query(Account).get(account_id)
db_session.close()
if doc is None:
raise falcon.HTTPBadRequest('Balance missing', 'Deposit money to start using an account')
else:
# Create a JSON representation of the resource
resp.body = json.dumps(doc.as_dict(), ensure_ascii=False)
# The following line can be omitted because 200 is the default
# status returned by the framework, but it is included here to
# illustrate how this may be overridden as needed.
resp.status = falcon.HTTP_200
class DepositResource(object):
def on_post(self, req, resp):
body = req.stream.read()
doc = json.loads(body.decode('utf-8'))
logger.info('doc: {}'.format(doc))
payload = [
{
"eventId": str(uuid.uuid1()),
"eventType": "created-deposit",
"data": doc
}
]
logger.info("payload: {}".format(payload))
r = requests.post(stream_url, data=str(payload), headers=content_header)
resp.status = falcon.HTTP_200
class TransferResource(object):
def on_post(self, req, resp):
body = req.stream.read()
doc = json.loads(body.decode('utf-8'))
acc = get_account(doc['account_id'])
payload = [
{
"eventId": str(uuid.uuid1()),
"eventType": "created-transfer",
"data": doc
}
]
if acc is None:
raise falcon.HTTPBadRequest('Account missing', 'You must deposit into an account before transfering')
if acc.balance < doc['amount']:
raise falcon.HTTPBadRequest('Insufficient funds', 'Account balance {} less than transfer amount {}'.format(acc.balance, doc['amount']))
else:
logger.info("payload: {}".format(payload))
r = requests.post(stream_url, data=str(payload), headers=content_header)
resp.status = falcon.HTTP_200 | [
"apps.restaccount.logging.logging.getLogger",
"falcon.HTTPBadRequest",
"decouple.config",
"apps.database.db_session.close",
"uuid.uuid1",
"apps.models.Account.query.get",
"apps.database.db_session.query",
"apps.database.init_db"
] | [((199, 226), 'apps.restaccount.logging.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'from apps.restaccount.logging import logging\n'), ((267, 314), 'decouple.config', 'config', (['"""EVENTSTORE_HOST"""'], {'default': '"""eventstore"""'}), "('EVENTSTORE_HOST', default='eventstore')\n", (273, 314), False, 'from decouple import config\n'), ((325, 374), 'decouple.config', 'config', (['"""EVENTSTORE_PORT"""'], {'default': '(2113)', 'cast': 'int'}), "('EVENTSTORE_PORT', default=2113, cast=int)\n", (331, 374), False, 'from decouple import config\n'), ((615, 644), 'apps.models.Account.query.get', 'Account.query.get', (['account_id'], {}), '(account_id)\n', (632, 644), False, 'from apps.models import Account\n'), ((731, 740), 'apps.database.init_db', 'init_db', ([], {}), '()\n', (738, 740), False, 'from apps.database import init_db, db_session\n'), ((805, 823), 'apps.database.db_session.close', 'db_session.close', ([], {}), '()\n', (821, 823), False, 'from apps.database import init_db, db_session\n'), ((867, 954), 'falcon.HTTPBadRequest', 'falcon.HTTPBadRequest', (['"""Balance missing"""', '"""Deposit money to start using an account"""'], {}), "('Balance missing',\n 'Deposit money to start using an account')\n", (888, 954), False, 'import falcon\n'), ((2297, 2396), 'falcon.HTTPBadRequest', 'falcon.HTTPBadRequest', (['"""Account missing"""', '"""You must deposit into an account before transfering"""'], {}), "('Account missing',\n 'You must deposit into an account before transfering')\n", (2318, 2396), False, 'import falcon\n'), ((755, 780), 'apps.database.db_session.query', 'db_session.query', (['Account'], {}), '(Account)\n', (771, 780), False, 'from apps.database import init_db, db_session\n'), ((1608, 1620), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1618, 1620), False, 'import uuid\n'), ((2148, 2160), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (2158, 2160), False, 'import uuid\n')] |
# -*- coding: utf-8 -*-
# @Author : Skye
# @Time : 2018/1/8 20:38
# @desc : python 3 , 答题闯关辅助,截屏 ,OCR 识别,百度搜索
import io
import urllib.parse
import webbrowser
import requests
import base64
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
def pull_screenshot():
os.system('adb shell screencap -p /sdcard/screenshot.png')
os.system('adb pull /sdcard/screenshot.png .')
pull_screenshot()
img = Image.open("./screenshot.png")
# 用 matplot 查看测试分辨率,切割
region = img.crop((50, 350, 1000, 560)) # 坚果 pro1
region.save('./crop.png')
#region = img.crop((75, 315, 1167, 789)) # iPhone 7P
#im = plt.imshow(img, animated=True)
#im2 = plt.imshow(region, animated=True)
#plt.show()
# 百度OCR API ,在 https://cloud.baidu.com/product/ocr 上注册新建应用即可
api_key = 'oZokCbcX3unqb4CpGvD873Co'
api_secret = '<KEY>'
# 获取token
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id='+api_key+'&client_secret='+api_secret
headers = {
'Content-Type':'application/json;charset=UTF-8'
}
res = requests.get(url=host,headers=headers).json()
token = res['access_token']
imgByteArr = io.BytesIO()
region.save(imgByteArr, format='PNG')
image_data = imgByteArr.getvalue()
base64_data = base64.b64encode(image_data)
r = requests.post('https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic',
params={'access_token': token}, data={'image': base64_data})
result = ''
for i in r.json()['words_result']:
result += i['words']
result = urllib.parse.quote(result)
webbrowser.open('https://baidu.com/s?wd='+result)
| [
"requests.post",
"PIL.Image.open",
"base64.b64encode",
"io.BytesIO",
"webbrowser.open",
"requests.get",
"os.system"
] | [((443, 473), 'PIL.Image.open', 'Image.open', (['"""./screenshot.png"""'], {}), "('./screenshot.png')\n", (453, 473), False, 'from PIL import Image\n'), ((1143, 1155), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1153, 1155), False, 'import io\n'), ((1244, 1272), 'base64.b64encode', 'base64.b64encode', (['image_data'], {}), '(image_data)\n', (1260, 1272), False, 'import base64\n'), ((1277, 1413), 'requests.post', 'requests.post', (['"""https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic"""'], {'params': "{'access_token': token}", 'data': "{'image': base64_data}"}), "('https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic',\n params={'access_token': token}, data={'image': base64_data})\n", (1290, 1413), False, 'import requests\n'), ((1532, 1583), 'webbrowser.open', 'webbrowser.open', (["('https://baidu.com/s?wd=' + result)"], {}), "('https://baidu.com/s?wd=' + result)\n", (1547, 1583), False, 'import webbrowser\n'), ((308, 366), 'os.system', 'os.system', (['"""adb shell screencap -p /sdcard/screenshot.png"""'], {}), "('adb shell screencap -p /sdcard/screenshot.png')\n", (317, 366), False, 'import os\n'), ((371, 417), 'os.system', 'os.system', (['"""adb pull /sdcard/screenshot.png ."""'], {}), "('adb pull /sdcard/screenshot.png .')\n", (380, 417), False, 'import os\n'), ((1054, 1093), 'requests.get', 'requests.get', ([], {'url': 'host', 'headers': 'headers'}), '(url=host, headers=headers)\n', (1066, 1093), False, 'import requests\n')] |
from __future__ import print_function
import os
import re
def openFile(f, m='r'):
if (os.path.exists(f)):
return open(f, m)
else:
return open('../' + f, m)
demo_test = ' '.join(openFile('mockito_test/demo_test.py').readlines())
demo_test = demo_test.split('#DELIMINATOR')[1]
readme_before = ''.join(openFile('README').readlines())
token = 'Basic usage:'
readme_after = re.compile(token + '.*', re.S).sub(token + '\n' + demo_test, readme_before)
if (readme_before != readme_after):
readme_file = openFile('README', 'w')
readme_file.write(readme_after)
print("README updated")
else:
print("README update not required")
| [
"os.path.exists",
"re.compile"
] | [((90, 107), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (104, 107), False, 'import os\n'), ((390, 420), 're.compile', 're.compile', (["(token + '.*')", 're.S'], {}), "(token + '.*', re.S)\n", (400, 420), False, 'import re\n')] |
from flask import Flask
from flask import render_template
from flask import request,session, redirect, url_for, escape,send_from_directory
import requests
import json
app = Flask(__name__, static_url_path='')
def predictor(tavg, model, degree):
if degree == 3:
y = model['coef'][0][3]*(tavg**3) + \
model['coef'][0][2]*(tavg**2) + \
model['coef'][0][1]*(tavg) + \
model['intercept'][0]
if degree == 4:
y = model['coef'][0][4]*(tavg**4) + \
model['coef'][0][3]*(tavg**3) + \
model['coef'][0][2]*(tavg**2) + \
model['coef'][0][1]*(tavg) + \
model['intercept'][0]
return round(y,0)
@app.route("/", methods=['GET','POST'])
def start():
return render_template('index.html')
@app.route("/code_explanation", methods=['GET','POST'])
def explanation():
return render_template('code_explanation.html')
@app.route("/web_scraping", methods=['GET','POST'])
def scraping():
return render_template('web_scraping.html')
@app.route("/preprocessing", methods=['GET','POST'])
def preprocessing():
return render_template('data_preprocess.html')
@app.route("/analyzing", methods=['GET','POST'])
def analyzing():
return render_template('data_analysis_overall.html')
@app.route("/regression", methods=['GET','POST'])
def regression():
return render_template('regression_analysis.html')
@app.route("/pattern", methods=['GET','POST'])
def pattern():
return render_template('crime_pattern_analysis.html')
@app.route("/short_patrol", methods=['GET','POST'])
def short_patrol():
return render_template('shortest_patrol_route.html')
@app.route("/markov_chain_demo", methods=['GET','POST'])
def mcdemo():
file = open("model/MC_and_lamda.txt","r")
data = json.loads(file.read())
return render_template('markov_chain_demo.html', data = data)
@app.route("/regression_api", methods=['GET','POST'])
def regression_api():
return render_template('regression_api.html')
@app.route("/predict_crime_by_temp", methods=['GET','POST'])
def perdict():
# load model
temp = request.args.get('temp')
if temp == None:
return "error: no input"
file = open("model/regression.txt","r")
models = json.loads(file.read())
if float(temp) >= 25:
degree = 4
model = models['regressorpoly4']
else :
degree = 3
model = models['regressorpoly325']
return str(predictor(float(temp), model, degree))
@app.route("/crime_forecast", methods=['GET','POST'])
def forecast():
r = requests.get('https://weather.com/weather/tenday/l/Boston+MA?canonicalCityId=6320cadd3d539b434b5a45c094becf3edbe8ea88958185a2287a801115c9ae30')
lines = r.text.split('\n')
conditions = []
n = 0
for line in lines:
if '<td class="temp" headers="hi-lo"' in line:
n += 1
if n == 1:
continue
condition = {}
if len(line.split("</tr>")) == 2:
condition['day'] = line.split('<span class="day-detail')[1].split("</span>")[0].split('>')[1]
hi = line.split('<td class="temp" headers="hi-lo"')[1].split("</sup>")[0].split('<span class="">')[1].split('<sup>')[0]
low = line.split('<td class="temp" headers="hi-lo"')[1].split("</sup>")[1].split('<span class="">')[1].split('<sup>')[0]
condition['temp'] = (float(hi)+float(low))/2
conditions.append(condition)
else :
tr = line.split("</tr>")
for i in range(len(tr)):
condition = {}
if i < len(tr) - 1:
condition['day'] = tr[i].split('<span class="day-detail')[1].split("</span>")[0].split('>')[1]
td = tr[i].split("hi-lo")
hi = td[1].split("</sup>")[0].split('<span class="">')[1].split('<sup>')[0]
low = td[1].split("</sup>")[1].split('<span class="">')[1].split('<sup>')[0]
condition['temp'] = (float(hi)+float(low))/2
conditions.append(condition)
crimes = []
temps = []
days = []
file = open("model/regression.txt","r")
models = json.loads(file.read())
for i in conditions:
if float(i['temp']) >= 25:
degree = 4
model = models['regressorpoly4']
else :
degree = 3
model = models['regressorpoly325']
days.append(i['day'])
temps.append(i['temp'])
crimes.append(float(predictor(float(i['temp']), model, degree)))
return render_template('forecast.html', days = days, temps = temps, crimes = crimes)
import webbrowser
webbrowser.open_new_tab("http://localhost:5000/")
if __name__ == "__main__":
app.run(host= '127.0.0.1',debug=False)
| [
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"requests.get",
"webbrowser.open_new_tab"
] | [((174, 209), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""'}), "(__name__, static_url_path='')\n", (179, 209), False, 'from flask import Flask\n'), ((4780, 4829), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['"""http://localhost:5000/"""'], {}), "('http://localhost:5000/')\n", (4803, 4829), False, 'import webbrowser\n'), ((762, 791), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (777, 791), False, 'from flask import render_template\n'), ((879, 919), 'flask.render_template', 'render_template', (['"""code_explanation.html"""'], {}), "('code_explanation.html')\n", (894, 919), False, 'from flask import render_template\n'), ((1000, 1036), 'flask.render_template', 'render_template', (['"""web_scraping.html"""'], {}), "('web_scraping.html')\n", (1015, 1036), False, 'from flask import render_template\n'), ((1123, 1162), 'flask.render_template', 'render_template', (['"""data_preprocess.html"""'], {}), "('data_preprocess.html')\n", (1138, 1162), False, 'from flask import render_template\n'), ((1241, 1286), 'flask.render_template', 'render_template', (['"""data_analysis_overall.html"""'], {}), "('data_analysis_overall.html')\n", (1256, 1286), False, 'from flask import render_template\n'), ((1371, 1414), 'flask.render_template', 'render_template', (['"""regression_analysis.html"""'], {}), "('regression_analysis.html')\n", (1386, 1414), False, 'from flask import render_template\n'), ((1493, 1539), 'flask.render_template', 'render_template', (['"""crime_pattern_analysis.html"""'], {}), "('crime_pattern_analysis.html')\n", (1508, 1539), False, 'from flask import render_template\n'), ((1628, 1673), 'flask.render_template', 'render_template', (['"""shortest_patrol_route.html"""'], {}), "('shortest_patrol_route.html')\n", (1643, 1673), False, 'from flask import render_template\n'), ((1843, 1895), 'flask.render_template', 'render_template', (['"""markov_chain_demo.html"""'], {'data': 'data'}), "('markov_chain_demo.html', data=data)\n", (1858, 1895), False, 'from flask import render_template\n'), ((1990, 2028), 'flask.render_template', 'render_template', (['"""regression_api.html"""'], {}), "('regression_api.html')\n", (2005, 2028), False, 'from flask import render_template\n'), ((2148, 2172), 'flask.request.args.get', 'request.args.get', (['"""temp"""'], {}), "('temp')\n", (2164, 2172), False, 'from flask import request, session, redirect, url_for, escape, send_from_directory\n'), ((2611, 2764), 'requests.get', 'requests.get', (['"""https://weather.com/weather/tenday/l/Boston+MA?canonicalCityId=6320cadd3d539b434b5a45c094becf3edbe8ea88958185a2287a801115c9ae30"""'], {}), "(\n 'https://weather.com/weather/tenday/l/Boston+MA?canonicalCityId=6320cadd3d539b434b5a45c094becf3edbe8ea88958185a2287a801115c9ae30'\n )\n", (2623, 2764), False, 'import requests\n'), ((4683, 4754), 'flask.render_template', 'render_template', (['"""forecast.html"""'], {'days': 'days', 'temps': 'temps', 'crimes': 'crimes'}), "('forecast.html', days=days, temps=temps, crimes=crimes)\n", (4698, 4754), False, 'from flask import render_template\n')] |
# Generated by Django 3.2 on 2021-05-01 14:42
from django.db import migrations, models
import easy_thumbnails.fields
import embed_video.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='venue',
name='capacity',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='content',
field=embed_video.fields.EmbedVideoField(blank=True, null=True),
),
migrations.AlterField(
model_name='image',
name='image',
field=easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=''),
),
]
| [
"django.db.models.IntegerField"
] | [((377, 419), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (396, 419), False, 'from django.db import migrations, models\n')] |
import numpy as np
from scipy.io import loadmat
import os
import logging
from scipy.signal import butter, filtfilt
def mat2npy(mat_chanmap_dir):
mat_chanmap = loadmat(mat_chanmap_dir)
x = mat_chanmap['xcoords']
y = mat_chanmap['ycoords']
npy_chanmap = np.hstack([x,y])
#np.save('chanmap.npy', npy_chanmap) # you can't just go saving this wherever
return npy_chanmap
def merge_filtered_files(filtered_location, output_directory, delete=True):
filenames = os.listdir(filtered_location)
filenames_sorted = sorted(filenames)
f_out = os.path.join(output_directory, "standardized.bin")
f = open(f_out, 'wb')
for fname in filenames_sorted:
if '.ipynb' in fname or 'standardized' in fname:
continue
res = np.load(os.path.join(filtered_location, fname)).astype('int16') # was float32
res.tofile(f)
if delete:
os.remove(os.path.join(filtered_location, fname))
# Added functions from yass to avoid mandatory yass install
"""
Filtering functions
"""
def _butterworth(ts, low_frequency, high_factor, order, sampling_frequency):
"""Butterworth filter
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
low = float(low_frequency) / sampling_frequency * 2
high = float(high_factor) * 2
b, a = butter(order, low, btype='high', analog=False)
if ts.ndim == 1:
return filtfilt(b, a, ts)
else:
T, C = ts.shape
output = np.zeros((T, C), 'float32')
for c in range(C):
output[:, c] = filtfilt(b, a, ts[:, c])
return output
def _mean_standard_deviation(rec, centered=False):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
centered : bool
if not standardized, center it
Returns
-------
sd : vector [number of channels]
standard deviation in each channel
"""
# find standard deviation using robust method
if not centered:
centers = np.mean(rec, axis=0)
rec = rec - centers[None]
else:
centers = np.zeros(rec.shape[1], 'float32')
return np.median(np.abs(rec), 0) / 0.6745, centers
def _standardize(rec, sd=None, centers=None):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
recording
sd : vector [number of chnanels,]
standard deviation
centered : bool
if not standardized, center it
Returns
-------
matrix [length of recording, number of channels]
standardized recording
"""
# find standard deviation using robust method
if (sd is None) or (centers is None):
sd, centers = _mean_standard_deviation(rec, centered=False)
# standardize all channels with SD> 0.1 (Voltage?) units
# Cat: TODO: ensure that this is actually correct for all types of channels
idx1 = np.where(sd >= 0.1)[0]
rec[:, idx1] = np.divide(rec[:, idx1] - centers[idx1][None], sd[idx1])
# zero out bad channels
idx2 = np.where(sd < 0.1)[0]
rec[:, idx2] = 0.
return rec
# return np.divide(rec, sd)
def filter_standardize_batch(batch_id, reader, fname_mean_sd,
apply_filter, out_dtype, output_directory,
low_frequency=None, high_factor=None,
order=None, sampling_frequency=None):
"""Butterworth filter for a one dimensional time series
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
logger = logging.getLogger(__name__)
# filter
if apply_filter:
# read a batch
ts = reader.read_data_batch(batch_id, add_buffer=True)
ts = _butterworth(ts, low_frequency, high_factor,
order, sampling_frequency)
ts = ts[reader.buffer:-reader.buffer]
else:
ts = reader.read_data_batch(batch_id, add_buffer=False)
# standardize
temp = np.load(fname_mean_sd)
sd = temp['sd']
centers = temp['centers']
ts = _standardize(ts, sd, centers)
# save
fname = os.path.join(
output_directory,
"standardized_{}.npy".format(
str(batch_id).zfill(6)))
np.save(fname, ts.astype(out_dtype))
# fname = os.path.join(
# output_directory,
# "standardized_{}.bin".format(
# str(batch_id).zfill(6)))
# f = open(fname, 'wb')
# f.write(ts.astype(out_dtype))
def get_std(ts,
sampling_frequency,
fname,
apply_filter=False,
low_frequency=None,
high_factor=None,
order=None):
"""Butterworth filter for a one dimensional time series
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
# filter
if apply_filter:
ts = _butterworth(ts, low_frequency, high_factor,
order, sampling_frequency)
# standardize
sd, centers = _mean_standard_deviation(ts)
# save
np.savez(fname,
centers=centers,
sd=sd) | [
"logging.getLogger",
"numpy.mean",
"numpy.savez",
"os.listdir",
"numpy.abs",
"numpy.hstack",
"scipy.signal.filtfilt",
"numpy.where",
"scipy.io.loadmat",
"os.path.join",
"scipy.signal.butter",
"numpy.zeros",
"numpy.load",
"numpy.divide"
] | [((164, 188), 'scipy.io.loadmat', 'loadmat', (['mat_chanmap_dir'], {}), '(mat_chanmap_dir)\n', (171, 188), False, 'from scipy.io import loadmat\n'), ((274, 291), 'numpy.hstack', 'np.hstack', (['[x, y]'], {}), '([x, y])\n', (283, 291), True, 'import numpy as np\n'), ((495, 524), 'os.listdir', 'os.listdir', (['filtered_location'], {}), '(filtered_location)\n', (505, 524), False, 'import os\n'), ((578, 628), 'os.path.join', 'os.path.join', (['output_directory', '"""standardized.bin"""'], {}), "(output_directory, 'standardized.bin')\n", (590, 628), False, 'import os\n'), ((1853, 1899), 'scipy.signal.butter', 'butter', (['order', 'low'], {'btype': '"""high"""', 'analog': '(False)'}), "(order, low, btype='high', analog=False)\n", (1859, 1899), False, 'from scipy.signal import butter, filtfilt\n'), ((3591, 3646), 'numpy.divide', 'np.divide', (['(rec[:, idx1] - centers[idx1][None])', 'sd[idx1]'], {}), '(rec[:, idx1] - centers[idx1][None], sd[idx1])\n', (3600, 3646), True, 'import numpy as np\n'), ((4715, 4742), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4732, 4742), False, 'import logging\n'), ((5125, 5147), 'numpy.load', 'np.load', (['fname_mean_sd'], {}), '(fname_mean_sd)\n', (5132, 5147), True, 'import numpy as np\n'), ((6683, 6722), 'numpy.savez', 'np.savez', (['fname'], {'centers': 'centers', 'sd': 'sd'}), '(fname, centers=centers, sd=sd)\n', (6691, 6722), True, 'import numpy as np\n'), ((1937, 1955), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'ts'], {}), '(b, a, ts)\n', (1945, 1955), False, 'from scipy.signal import butter, filtfilt\n'), ((2007, 2034), 'numpy.zeros', 'np.zeros', (['(T, C)', '"""float32"""'], {}), "((T, C), 'float32')\n", (2015, 2034), True, 'import numpy as np\n'), ((2603, 2623), 'numpy.mean', 'np.mean', (['rec'], {'axis': '(0)'}), '(rec, axis=0)\n', (2610, 2623), True, 'import numpy as np\n'), ((2686, 2719), 'numpy.zeros', 'np.zeros', (['rec.shape[1]', '"""float32"""'], {}), "(rec.shape[1], 'float32')\n", (2694, 2719), True, 'import numpy as np\n'), ((3549, 3568), 'numpy.where', 'np.where', (['(sd >= 0.1)'], {}), '(sd >= 0.1)\n', (3557, 3568), True, 'import numpy as np\n'), ((3687, 3705), 'numpy.where', 'np.where', (['(sd < 0.1)'], {}), '(sd < 0.1)\n', (3695, 3705), True, 'import numpy as np\n'), ((2089, 2113), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'ts[:, c]'], {}), '(b, a, ts[:, c])\n', (2097, 2113), False, 'from scipy.signal import butter, filtfilt\n'), ((923, 961), 'os.path.join', 'os.path.join', (['filtered_location', 'fname'], {}), '(filtered_location, fname)\n', (935, 961), False, 'import os\n'), ((2742, 2753), 'numpy.abs', 'np.abs', (['rec'], {}), '(rec)\n', (2748, 2753), True, 'import numpy as np\n'), ((790, 828), 'os.path.join', 'os.path.join', (['filtered_location', 'fname'], {}), '(filtered_location, fname)\n', (802, 828), False, 'import os\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import contextlib
import torch
import torch.nn.functional as F
from typing import List, Tuple, Dict, Optional
from transformers import BertForMaskedLM
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
GradMultiply,
PositionalEmbedding,
TransformerDecoderLayer,
TransposeLast,
Fp32LayerNorm,
Fp32GroupNorm,
FairseqDropout
)
from .wav2vec2_ctc import (
Linear,
Wav2VecEncoder,
add_common_args,
base_architecture
)
from .wav2vec2_cif import (
CIFFcModel,
CIFFcModelV2,
cif_architecture,
)
def padding2attention_mask(padding_mask):
mask1 = F.pad(padding_mask, [0, 1, 0, 0], value=1)
mask2 = F.pad(padding_mask, [1, 0, 0, 0], value=0)
mask = 1 - mask1.int() * mask2.int()
return F.pad(mask, [1, 0, 0, 0], value=1)
def pred2bert_input(pred, token_mask, cls=101, sep=102):
pred *= token_mask
end_index = token_mask.sum(-1).long().unsqueeze(1) + 1
pred.scatter_(dim=-1, index=end_index, value=sep)
pred[:, 0] = cls
return pred
def add_lm_args(parser):
parser.add_argument(
"--freeze-lm-finetune-updates", type=int, default=0, help="freeze_lm_finetune_updates"
)
parser.add_argument(
"--gold-rate-range", type=str, help="gold-rate-range"
)
parser.add_argument(
"--gold-rate-steps", type=str, help="gold-rate-steps"
)
parser.add_argument(
"--infer-threash", type=float, default=0.8, help="infer-threash"
)
parser.add_argument(
"--lambda-embedding", type=float, metavar="D", help="lambda-embedding"
)
parser.add_argument(
"--lambda-am", type=float, default=1.0, metavar="D", help="lambda-am"
)
parser.add_argument(
"--lambda-lm", type=float, default=0.2, metavar="D", help="lambda-lm"
)
parser.add_argument("--lambda-qua", type=float, default=0.1, metavar="D", help="lambda-qua")
@register_model("w2v_cif_bert")
class W2V_CIF_BERT(BaseFairseqModel):
def __init__(self, args, encoder, bert, to_vocab, tgt_dict):
"""
.copy_() clone to_vocab
"""
super().__init__()
self.encoder = encoder
self.bert = bert
self.dim_bert = bert.embeddings.word_embeddings.weight.size(1)
self.to_vocab = to_vocab # 768 -> 21128
self.to_vocab_ac = copy.deepcopy(to_vocab)
self.to_vocab_ctc = copy.deepcopy(to_vocab)
self.proj = Linear(encoder.d-1, self.dim_bert)
self.tgt_dict = tgt_dict
self.num_updates = 0
self.args = args
self.freeze_lm_finetune_updates = args.freeze_lm_finetune_updates
self.gold_rate_range = eval(args.gold_rate_range)
self.gold_rate_steps = eval(args.gold_rate_steps)
for p in self.bert.embeddings.parameters():
p.requires_grad = False
@staticmethod
def add_args(parser):
add_common_args(parser)
add_lm_args(parser)
parser.add_argument("--lambda-ctc", type=float, metavar="D", help="lambda-ctc")
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
w2v_cif_bert_architecture(args)
tgt_dict = task.target_dictionary
bert, to_vocab = cls.build_bert(args, tgt_dict)
encoder = cls.build_encoder(args) # encoder
return cls(args, encoder, bert, to_vocab, tgt_dict)
@classmethod
def build_encoder(cls, args, tgt_dict=None):
return Wav2VecEncoder(args, tgt_dict=tgt_dict)
@classmethod
def build_bert(cls, args, tgt_dict):
pretrained_model = BertForMaskedLM.from_pretrained(args.bert_name)
bert = pretrained_model.bert
to_vocab = pretrained_model.cls
return bert, to_vocab
def forward(self, **kwargs):
"""
encoder_output= "encoder_out": x,
"encoded": encoded,
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
"""
encoder_output = self.encoder(tbc=False, **kwargs)
hidden_encoded = encoder_output['encoder_out'][:, :, :-1]
hidden_ctc = F.pad(hidden_encoded, [0, 1, 0, 0, 0, 0], value=0)
logits_ctc = self.to_vocab_ctc(hidden_ctc)
len_logits_ctc = (~encoder_output['padding_mask']).sum(-1).long()
alphas = CIFFcModelV2.get_alphas(encoder_output)
if self.training:
gold_rate = self.set_gold_rate()
decode_length = kwargs['target_lengths']
gold_ids = kwargs['bert_input'].long()
noise = 0.0
else:
gold_rate = 0.0
decode_length = torch.round(alphas.sum(-1)).int()
gold_ids = None
noise = 0.0
_alphas, num_output = self.resize(alphas, decode_length, noise=noise)
padding_mask = ~utils.sequence_mask(decode_length).bool()
cif_outputs = self.cif(hidden_encoded, _alphas)
hidden_ac = self.proj(cif_outputs)
logits_ac = self.to_vocab_ac(hidden_ac)
ft = self.freeze_lm_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
logits_lm, gold_embedding, pred_mask, token_mask = self.bert_forward(
hidden_ac, logits_ac, padding_mask, gold_ids, gold_rate,
threash=self.args.infer_threash)
logits = self.args.lambda_am * logits_ac + self.args.lambda_lm * logits_lm
logits *= (~padding_mask).unsqueeze(-1).float()
return {'logits': logits, 'len_logits': decode_length,
'alphas': alphas, 'num_output': num_output, 'gold_rate': gold_rate,
'logits_ctc': logits_ctc, 'len_logits_ctc': len_logits_ctc,
'pred_mask': pred_mask[:, 1:-1], 'token_mask': token_mask[:, 1:-1]}
def bert_forward(self, hidden, logits_ac, padding_mask, gold_ids=None, gold_rate=0.0, threash=0.8):
"""
"""
device = hidden.device
token_mask = F.pad(~padding_mask, [1, 1, 0, 0], value=0)
if self.training:
input_ids = gold_ids
pred_mask = (torch.rand(input_ids.size(), device=device) > gold_rate) * token_mask
else: # infer
probs = F.pad(utils.softmax(logits_ac.float(), dim=-1), [0, 0, 1, 1, 0, 0], value=0)
confident, preds = probs.max(-1)
input_ids = pred2bert_input(preds, token_mask)
pred_mask = (confident <= threash) * token_mask
# mixing
gold_embedding = self.bert.embeddings.word_embeddings(input_ids)
hidden_mix = torch.where(pred_mask[:, :, None].repeat(1, 1, hidden.size(-1)),
F.pad(hidden, [0, 0, 1, 1, 0, 0], value=0),
gold_embedding)
attention_mask = padding2attention_mask(padding_mask)
embeddings = self.bert.embeddings(inputs_embeds=hidden_mix)
encoder_outputs = self.bert.encoder(
embeddings,
attention_mask=attention_mask[:, None, None, :])
logits = self.to_vocab(encoder_outputs[0])
logits = logits[:, 1:-1, :]
return logits, gold_embedding, pred_mask, token_mask
@staticmethod
def resize(*args, **kwargs):
return CIFFcModel.resize(*args, **kwargs)
@staticmethod
def cif(*args, **kwargs):
return CIFFcModel.cif(*args, **kwargs)
def get_normalized_probs(self, net_output, log_probs, retrun_ctc=False):
"""Get normalized probabilities (or log probs) from a net's output."""
logits_ctc = net_output["logits_ctc"]
logits = net_output["logits"]
if log_probs:
res_ctc = utils.log_softmax(logits_ctc.float(), dim=-1)
res = utils.log_softmax(logits.float(), dim=-1)
else:
res_ctc = utils.softmax(logits_ctc.float(), dim=-1)
res = utils.softmax(logits.float(), dim=-1)
res_ctc.batch_first = True
res.batch_first = True
if retrun_ctc:
return res_ctc, res
else:
return res
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def set_gold_rate(self):
s, e = self.gold_rate_range
s1, s2 = self.gold_rate_steps
gold_rate = max((1 - max((self.num_updates - s1), 0) / s2) * (s-e), 0) + e
return gold_rate
@register_model_architecture("w2v_cif_bert", "w2v_cif_bert")
def w2v_cif_bert_architecture(args):
cif_architecture(args)
args.share_final_proj = getattr(args, "share_final_proj", False)
| [
"transformers.BertForMaskedLM.from_pretrained",
"fairseq.models.register_model_architecture",
"fairseq.models.register_model",
"fairseq.utils.sequence_mask",
"contextlib.ExitStack",
"copy.deepcopy",
"torch.nn.functional.pad",
"torch.no_grad"
] | [((2208, 2238), 'fairseq.models.register_model', 'register_model', (['"""w2v_cif_bert"""'], {}), "('w2v_cif_bert')\n", (2222, 2238), False, 'from fairseq.models import BaseFairseqModel, register_model, register_model_architecture\n'), ((8831, 8890), 'fairseq.models.register_model_architecture', 'register_model_architecture', (['"""w2v_cif_bert"""', '"""w2v_cif_bert"""'], {}), "('w2v_cif_bert', 'w2v_cif_bert')\n", (8858, 8890), False, 'from fairseq.models import BaseFairseqModel, register_model, register_model_architecture\n'), ((917, 959), 'torch.nn.functional.pad', 'F.pad', (['padding_mask', '[0, 1, 0, 0]'], {'value': '(1)'}), '(padding_mask, [0, 1, 0, 0], value=1)\n', (922, 959), True, 'import torch.nn.functional as F\n'), ((972, 1014), 'torch.nn.functional.pad', 'F.pad', (['padding_mask', '[1, 0, 0, 0]'], {'value': '(0)'}), '(padding_mask, [1, 0, 0, 0], value=0)\n', (977, 1014), True, 'import torch.nn.functional as F\n'), ((1068, 1102), 'torch.nn.functional.pad', 'F.pad', (['mask', '[1, 0, 0, 0]'], {'value': '(1)'}), '(mask, [1, 0, 0, 0], value=1)\n', (1073, 1102), True, 'import torch.nn.functional as F\n'), ((2628, 2651), 'copy.deepcopy', 'copy.deepcopy', (['to_vocab'], {}), '(to_vocab)\n', (2641, 2651), False, 'import copy\n'), ((2680, 2703), 'copy.deepcopy', 'copy.deepcopy', (['to_vocab'], {}), '(to_vocab)\n', (2693, 2703), False, 'import copy\n'), ((3938, 3985), 'transformers.BertForMaskedLM.from_pretrained', 'BertForMaskedLM.from_pretrained', (['args.bert_name'], {}), '(args.bert_name)\n', (3969, 3985), False, 'from transformers import BertForMaskedLM\n'), ((4509, 4559), 'torch.nn.functional.pad', 'F.pad', (['hidden_encoded', '[0, 1, 0, 0, 0, 0]'], {'value': '(0)'}), '(hidden_encoded, [0, 1, 0, 0, 0, 0], value=0)\n', (4514, 4559), True, 'import torch.nn.functional as F\n'), ((6356, 6399), 'torch.nn.functional.pad', 'F.pad', (['(~padding_mask)', '[1, 1, 0, 0]'], {'value': '(0)'}), '(~padding_mask, [1, 1, 0, 0], value=0)\n', (6361, 6399), True, 'import torch.nn.functional as F\n'), ((7048, 7090), 'torch.nn.functional.pad', 'F.pad', (['hidden', '[0, 0, 1, 1, 0, 0]'], {'value': '(0)'}), '(hidden, [0, 0, 1, 1, 0, 0], value=0)\n', (7053, 7090), True, 'import torch.nn.functional as F\n'), ((5469, 5484), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5482, 5484), False, 'import torch\n'), ((5500, 5522), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (5520, 5522), False, 'import contextlib\n'), ((5201, 5235), 'fairseq.utils.sequence_mask', 'utils.sequence_mask', (['decode_length'], {}), '(decode_length)\n', (5220, 5235), False, 'from fairseq import utils\n')] |
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import sys
import cv2
import time
from .sdf_utils import *
import _init_paths
from fcn.config import cfg
from layers.sdf_matching_loss import SDFLoss
class sdf_optimizer():
def __init__(self, classes, sdf_files, lr=0.01, optimizer='Adam', use_gpu=True):
self.classes = classes
self.sdf_files = sdf_files
self.use_gpu = use_gpu
num = len(sdf_files)
self.xmins = np.zeros((num, ), dtype=np.float32)
self.ymins = np.zeros((num, ), dtype=np.float32)
self.zmins = np.zeros((num, ), dtype=np.float32)
self.xmaxs = np.zeros((num, ), dtype=np.float32)
self.ymaxs = np.zeros((num, ), dtype=np.float32)
self.zmaxs = np.zeros((num, ), dtype=np.float32)
sdf_torch_list = []
for i in range(len(sdf_files)):
sdf_file = sdf_files[i]
print(' start loading sdf from {} ... '.format(sdf_file))
if sdf_file[-3:] == 'sdf':
sdf_info = read_sdf(sdf_file)
sdf = sdf_info[0]
min_coords = sdf_info[1]
delta = sdf_info[2]
max_coords = min_coords + delta * np.array(sdf.shape)
self.xmins[i], self.ymins[i], self.zmins[i] = min_coords
self.xmaxs[i], self.ymaxs[i], self.zmaxs[i] = max_coords
sdf_torch_list.append(torch.from_numpy(sdf).float())
elif sdf_file[-3:] == 'pth':
sdf_info = torch.load(sdf_file)
min_coords = sdf_info['min_coords']
max_coords = sdf_info['max_coords']
self.xmins[i], self.ymins[i], self.zmins[i] = min_coords
self.xmaxs[i], self.ymaxs[i], self.zmaxs[i] = max_coords
sdf_torch_list.append(sdf_info['sdf_torch'][0, 0].permute(1, 0, 2))
print(' minimal coordinate = ({:.4f}, {:.4f}, {:.4f}) cm'.format(self.xmins[i] * 100, self.ymins[i] * 100, self.zmins[i] * 100))
print(' maximal coordinate = ({:.4f}, {:.4f}, {:.4f}) cm'.format(self.xmaxs[i] * 100, self.ymaxs[i] * 100, self.zmaxs[i] * 100))
print(sdf_torch_list[-1].shape)
print(' finished loading sdf ! ')
# combine sdfs
max_shape = np.array([sdf.shape for sdf in sdf_torch_list]).max(axis=0)
self.sdf_torch = torch.ones((num, max_shape[0], max_shape[1], max_shape[2]), dtype=torch.float32)
self.sdf_limits = np.zeros((num, 9), dtype=np.float32)
for i in range(num):
size = sdf_torch_list[i].shape
self.sdf_torch[i, :size[0], :size[1], :size[2]] = sdf_torch_list[i]
self.sdf_limits[i, 0] = self.xmins[i]
self.sdf_limits[i, 1] = self.ymins[i]
self.sdf_limits[i, 2] = self.zmins[i]
self.sdf_limits[i, 3] = self.xmins[i] + (self.xmaxs[i] - self.xmins[i]) * max_shape[0] / size[0]
self.sdf_limits[i, 4] = self.ymins[i] + (self.ymaxs[i] - self.ymins[i]) * max_shape[1] / size[1]
self.sdf_limits[i, 5] = self.zmins[i] + (self.zmaxs[i] - self.zmins[i]) * max_shape[2] / size[2]
self.sdf_limits[i, 6] = max_shape[0]
self.sdf_limits[i, 7] = max_shape[1]
self.sdf_limits[i, 8] = max_shape[2]
self.sdf_limits = torch.from_numpy(self.sdf_limits)
if self.use_gpu:
self.sdf_torch = self.sdf_torch.cuda()
self.sdf_limits = self.sdf_limits.cuda()
self.sdf_loss = SDFLoss()
def look_up(self, samples_x, samples_y, samples_z):
samples_x = torch.clamp(samples_x, self.xmin, self.xmax)
samples_y = torch.clamp(samples_y, self.ymin, self.ymax)
samples_z = torch.clamp(samples_z, self.zmin, self.zmax)
samples_x = (samples_x - self.xmin) / (self.xmax - self.xmin)
samples_y = (samples_y - self.ymin) / (self.ymax - self.ymin)
samples_z = (samples_z - self.zmin) / (self.zmax - self.zmin)
samples = torch.cat((samples_z.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4),
samples_x.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4),
samples_y.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4)),
dim=4)
samples = samples * 2 - 1
return F.grid_sample(self.sdf_torch, samples, padding_mode="border")
def compute_dist(self, d_pose, T_oc_0, ps_c):
ps_o = torch.mm(Oplus(T_oc_0, d_pose, self.use_gpu), ps_c.permute(1, 0)).permute(1, 0)[:, :3]
dist = self.look_up(ps_o[:, 0], ps_o[:, 1], ps_o[:, 2])
return torch.abs(dist)
def refine_pose(self, T_co_0, ps_c, steps=100):
# input T_co_0: 4x4
# ps_c: nx4
if self.use_gpu:
T_oc_0 = torch.from_numpy(np.linalg.inv(T_co_0)).cuda()
else:
T_oc_0 = torch.from_numpy(np.linalg.inv(T_co_0))
self.dpose.data[:3] *= 0
self.dpose.data[3:] = self.dpose.data[3:] * 0 + 1e-12
self.dist = torch.zeros((ps_c.size(0),))
if self.use_gpu:
self.dist = self.dist.cuda()
for i in range(steps):
if self.optimizer_type == 'LBFGS':
def closure():
self.optimizer.zero_grad()
dist = self.compute_dist(self.dpose, T_oc_0, ps_c)
self.dist = dist.detach()
dist_target = torch.zeros_like(dist)
if self.use_gpu:
dist_target = dist_target.cuda()
loss = self.loss(dist, dist_target)
loss.backward()
return loss
self.optimizer.step(closure)
elif self.optimizer_type == 'Adam':
self.optimizer.zero_grad()
dist = self.compute_dist(self.dpose, T_oc_0, ps_c)
self.dist = dist.detach()
dist_target = torch.zeros_like(dist)
if self.use_gpu:
dist_target = dist_target.cuda()
loss = self.loss(dist, dist_target)
loss.backward()
self.optimizer.step()
# print('step: {}, loss = {}'.format(i + 1, loss.data.cpu().item()))
T_oc_opt = Oplus(T_oc_0, self.dpose, self.use_gpu)
T_co_opt = np.linalg.inv(T_oc_opt.cpu().detach().numpy())
dist = torch.mean(torch.abs(self.dist)).detach().cpu().numpy()
return T_co_opt, dist
def refine_pose_layer(self, T_oc_0, points, steps=100):
# input T_co_0: mx4x4, m is the number of objects
# points: nx3 in camera
# construct initial pose
pose_init = torch.from_numpy(T_oc_0).cuda()
m = T_oc_0.shape[0]
dpose = torch.zeros((m, 6), dtype=torch.float32, requires_grad=True, device=0)
dpose.data[:, :3] *= 0
dpose.data[:, 3:] = dpose.data[:, 3:] * 0 + 1e-12
treg = cfg.TEST.SDF_TRANSLATION_REG
rreg = cfg.TEST.SDF_ROTATION_REG
regularization = torch.tensor([treg, treg, treg, rreg, rreg, rreg], dtype=torch.float32, requires_grad=False, device=0)
start = time.time()
for i in range(steps):
# self.optimizer.zero_grad()
loss, sdf_values, T_oc_opt, dalpha, J = self.sdf_loss(dpose, pose_init, self.sdf_torch, self.sdf_limits, points, regularization)
# print(loss)
# loss.backward()
# self.optimizer.step()
# JTJ = JTJ.cpu().detach().numpy() + np.diag([100, 100, 100, 0.001, 0.001, 0.001]).astype(np.float32)
# J = J.cpu().detach().numpy()
# dalpha = torch.from_numpy(np.matmul(np.linalg.inv(JTJ), J)).cuda()
dpose = dpose - dalpha
# self.dpose = self.dpose - 0.001 * J
end = time.time()
print('sdf refinement iterations %d, time %f' % (steps, end - start))
return T_oc_opt.cpu().detach().numpy()
| [
"time.time",
"layers.sdf_matching_loss.SDFLoss"
] | [((3634, 3643), 'layers.sdf_matching_loss.SDFLoss', 'SDFLoss', ([], {}), '()\n', (3641, 3643), False, 'from layers.sdf_matching_loss import SDFLoss\n'), ((7331, 7342), 'time.time', 'time.time', ([], {}), '()\n', (7340, 7342), False, 'import time\n'), ((7989, 8000), 'time.time', 'time.time', ([], {}), '()\n', (7998, 8000), False, 'import time\n')] |
from pymunk.vec2d import Vec2d
from src.utils import AngleHelper
import pymunk
class Map:
def __init__(self):
self.crossings = []
self.streets = []
self.STREET_WIDTH = 50
self.SIDEWALK_WIDTH = 60
self.sidewalk_crossings = []
self.sidewalks = []
self.distances = []
self.spawn_positions = []
self.back_sprite = None
self.front_sprite = None
self.streets_length = []
self.sidewalks_length = []
self.collision_vertices = []
self.collision_edges = []
self.col_body = None
self.col_shapes = []
self.spawn_positions = [(100, 100), (150, 100), (200, 100), (250, 100)]
self.spawn_rotations = [0, 0, 0, 0]
def generate_body(self):
self.col_body = pymunk.Body(body_type=pymunk.Body.STATIC)
for edge in self.collision_edges:
self.col_shapes += [pymunk.shapes.Segment(self.col_body,
Vec2d(self.collision_vertices[edge[0]]),
Vec2d(self.collision_vertices[edge[1]]), 1)]
#for cvertex in self.collision_vertices:
# self.col_shapes += [pymunk.shapes.Circle(self.col_body, 8, cvertex)]
def generate_matrix(self):
self.distances.clear()
for i in range(len(self.sidewalk_crossings)):
self.distances += [[]]
for i in range(len(self.sidewalk_crossings)):
for j in range(len(self.sidewalk_crossings)):
self.distances[i].append(-1)
for i in range(len(self.sidewalk_crossings)):
self.distances[i][i] = 0
for edge in self.sidewalks:
self.distances[edge[0]][edge[1]] = self.distances[edge[1]][edge[0]] = \
Vec2d(self.sidewalk_crossings[edge[0]]).get_distance(Vec2d(self.sidewalk_crossings[edge[1]]))
for k in range(len(self.sidewalk_crossings)):
for i in range(len(self.sidewalk_crossings)):
for j in range(len(self.sidewalk_crossings)):
if self.distances[i][k] != -1 and self.distances[k][j] != -1:
if self.distances[i][k] + self.distances[k][j] < self.distances[i][j] or \
self.distances[i][j] == -1:
self.distances[i][j] = self.distances[i][k] + self.distances[k][j]
def generate_sidewalks(self):
self.sidewalk_crossings.clear()
self.sidewalks.clear()
vertices_streets = []
for crossing_index in range(len(self.crossings)):
vertex_street_directions = []
for street in self.streets:
if crossing_index in street:
vertex_street_directions.append(
((Vec2d(self.crossings[street[(street.index(crossing_index) + 1) % 2]]) -
Vec2d(self.crossings[crossing_index])).normalized(), self.streets.index(street)))
vertex_street_directions.sort(key=lambda obj: obj[0].angle)
for direction_index in range(len(vertex_street_directions)):
temp_vec = vertex_street_directions[direction_index][0].rotated(AngleHelper.angle_to_positive(
vertex_street_directions[direction_index][0].get_angle_between(
vertex_street_directions[(direction_index + 1) % len(vertex_street_directions)][0]))/2)
temp_vec.length = self.SIDEWALK_WIDTH / \
(2 * abs(temp_vec.dot(vertex_street_directions[direction_index][0].perpendicular())))
vertices_streets.append((Vec2d(self.crossings[crossing_index]) + temp_vec,
vertex_street_directions[direction_index][1],
vertex_street_directions[(direction_index + 1) %
len(vertex_street_directions)][1], crossing_index))
for element in vertices_streets:
self.sidewalk_crossings.append((element[0].x, element[0].y))
for element_index in range(len(vertices_streets)):
if element_index < len(vertices_streets) - 1:
if vertices_streets[element_index][3] == vertices_streets[element_index + 1][3]:
self.sidewalks.append((self.sidewalk_crossings.index(vertices_streets[element_index][0]),
self.sidewalk_crossings.index(vertices_streets[element_index + 1][0])))
elif element_index > 0:
if vertices_streets[element_index][3] == vertices_streets[element_index - 1][3]:
lower = element_index - 1
while lower > 0 and vertices_streets[lower][3] == vertices_streets[lower - 1][3]:
lower -= 1
if vertices_streets[lower][3] == vertices_streets[element_index][3]:
self.sidewalks.append((self.sidewalk_crossings.index(vertices_streets[element_index][0]),
self.sidewalk_crossings.index(vertices_streets[lower][0])))
elif element_index > 0:
if vertices_streets[element_index][3] == vertices_streets[element_index - 1][3]:
lower = element_index - 1
while lower > 0 and vertices_streets[lower][3] == vertices_streets[lower - 1][3]:
lower -= 1
if vertices_streets[lower][3] == vertices_streets[element_index][3]:
self.sidewalks.append((self.sidewalk_crossings.index(vertices_streets[element_index][0]),
self.sidewalk_crossings.index(vertices_streets[lower][0])))
for street_index in range(len(self.streets)):
crossings = []
for vertex_street in vertices_streets:
if vertex_street[1] == street_index or vertex_street[2] == street_index:
crossings.append(vertex_street)
street_direction = (Vec2d(self.crossings[self.streets[street_index][0]]) -
Vec2d(self.crossings[self.streets[street_index][1]])).normalized()
if street_direction.perpendicular().dot(crossings[0][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) * \
street_direction.perpendicular().dot(crossings[1][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) > 0:
self.sidewalks.append((self.sidewalk_crossings.index(crossings[0][0]),
self.sidewalk_crossings.index(crossings[1][0])))
self.sidewalks.append((self.sidewalk_crossings.index(crossings[2][0]),
self.sidewalk_crossings.index(crossings[3][0])))
elif street_direction.perpendicular().dot(crossings[0][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) * \
street_direction.perpendicular().dot(crossings[2][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) > 0:
self.sidewalks.append((self.sidewalk_crossings.index(crossings[0][0]),
self.sidewalk_crossings.index(crossings[2][0])))
self.sidewalks.append((self.sidewalk_crossings.index(crossings[1][0]),
self.sidewalk_crossings.index(crossings[3][0])))
else:
self.sidewalks.append((self.sidewalk_crossings.index(crossings[0][0]),
self.sidewalk_crossings.index(crossings[3][0])))
self.sidewalks.append((self.sidewalk_crossings.index(crossings[1][0]),
self.sidewalk_crossings.index(crossings[2][0])))
def calculate_lengths(self):
self.streets_length.clear()
for street in self.streets:
self.streets_length.append((Vec2d(self.crossings[street[0]]) - Vec2d(self.crossings[street[1]])).length)
self.sidewalks_length.clear()
for sidewalk in self.sidewalks:
self.sidewalks_length.append((Vec2d(self.sidewalk_crossings[sidewalk[0]]) -
Vec2d(self.sidewalk_crossings[sidewalk[1]])).length)
def calculate_internal_variables(self):
self.generate_sidewalks()
self.generate_matrix()
self.calculate_lengths()
def get_street_direction(self, street_index):
if street_index >= len(self.streets):
return
return (Vec2d(self.crossings[self.streets[street_index][1]]) -
Vec2d(self.crossings[self.streets[street_index][0]])).normalized()
def get_sidewalk_direction(self, sidewalk_index):
if sidewalk_index >= len(self.sidewalks):
return
return (Vec2d(self.sidewalk_crossings[self.sidewalks[sidewalk_index][1]]) -
Vec2d(self.sidewalk_crossings[self.sidewalks[sidewalk_index][0]])).normalized()
def street_first_crossing_index(self, street):
return self.crossings.index(self.crossings[self.streets[street][0]])
def street_second_crossing_index(self, street):
return self.crossings.index(self.crossings[self.streets[street][1]])
def sidewalk_first_crossing_index(self, sidewalk):
return self.sidewalk_crossings.index(self.sidewalk_crossings[self.sidewalks[sidewalk][0]])
def sidewalk_second_crossing_index(self, sidewalk):
return self.sidewalk_crossings.index(self.sidewalk_crossings[self.sidewalks[sidewalk][1]])
def street_first_crossing(self, street):
return self.crossings[self.streets[street][0]]
def street_second_crossing(self, street):
return self.crossings[self.streets[street][1]]
def sidewalk_first_crossing(self, sidewalk):
return self.sidewalk_crossings[self.sidewalks[sidewalk][0]]
def sidewalk_second_crossing(self, sidewalk):
return self.sidewalk_crossings[self.sidewalks[sidewalk][1]]
def draw_back(self):
if self.back_sprite is not None:
self.back_sprite.draw()
pass
def draw_front(self):
if self.front_sprite is not None:
self.front_sprite.draw()
pass
| [
"pymunk.vec2d.Vec2d",
"pymunk.Body"
] | [((801, 842), 'pymunk.Body', 'pymunk.Body', ([], {'body_type': 'pymunk.Body.STATIC'}), '(body_type=pymunk.Body.STATIC)\n', (812, 842), False, 'import pymunk\n'), ((1869, 1908), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.sidewalk_crossings[edge[1]]'], {}), '(self.sidewalk_crossings[edge[1]])\n', (1874, 1908), False, 'from pymunk.vec2d import Vec2d\n'), ((1008, 1047), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.collision_vertices[edge[0]]'], {}), '(self.collision_vertices[edge[0]])\n', (1013, 1047), False, 'from pymunk.vec2d import Vec2d\n'), ((1103, 1142), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.collision_vertices[edge[1]]'], {}), '(self.collision_vertices[edge[1]])\n', (1108, 1142), False, 'from pymunk.vec2d import Vec2d\n'), ((1816, 1855), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.sidewalk_crossings[edge[0]]'], {}), '(self.sidewalk_crossings[edge[0]])\n', (1821, 1855), False, 'from pymunk.vec2d import Vec2d\n'), ((8876, 8928), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][1]]'], {}), '(self.crossings[self.streets[street_index][1]])\n', (8881, 8928), False, 'from pymunk.vec2d import Vec2d\n'), ((8947, 8999), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][0]]'], {}), '(self.crossings[self.streets[street_index][0]])\n', (8952, 8999), False, 'from pymunk.vec2d import Vec2d\n'), ((9154, 9219), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.sidewalk_crossings[self.sidewalks[sidewalk_index][1]]'], {}), '(self.sidewalk_crossings[self.sidewalks[sidewalk_index][1]])\n', (9159, 9219), False, 'from pymunk.vec2d import Vec2d\n'), ((9238, 9303), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.sidewalk_crossings[self.sidewalks[sidewalk_index][0]]'], {}), '(self.sidewalk_crossings[self.sidewalks[sidewalk_index][0]])\n', (9243, 9303), False, 'from pymunk.vec2d import Vec2d\n'), ((6145, 6197), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][0]]'], {}), '(self.crossings[self.streets[street_index][0]])\n', (6150, 6197), False, 'from pymunk.vec2d import Vec2d\n'), ((6232, 6284), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][1]]'], {}), '(self.crossings[self.streets[street_index][1]])\n', (6237, 6284), False, 'from pymunk.vec2d import Vec2d\n'), ((8259, 8291), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[street[0]]'], {}), '(self.crossings[street[0]])\n', (8264, 8291), False, 'from pymunk.vec2d import Vec2d\n'), ((8294, 8326), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[street[1]]'], {}), '(self.crossings[street[1]])\n', (8299, 8326), False, 'from pymunk.vec2d import Vec2d\n'), ((8456, 8499), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.sidewalk_crossings[sidewalk[0]]'], {}), '(self.sidewalk_crossings[sidewalk[0]])\n', (8461, 8499), False, 'from pymunk.vec2d import Vec2d\n'), ((8544, 8587), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.sidewalk_crossings[sidewalk[1]]'], {}), '(self.sidewalk_crossings[sidewalk[1]])\n', (8549, 8587), False, 'from pymunk.vec2d import Vec2d\n'), ((3658, 3695), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[crossing_index]'], {}), '(self.crossings[crossing_index])\n', (3663, 3695), False, 'from pymunk.vec2d import Vec2d\n'), ((6421, 6473), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][0]]'], {}), '(self.crossings[self.streets[street_index][0]])\n', (6426, 6473), False, 'from pymunk.vec2d import Vec2d\n'), ((6611, 6663), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][0]]'], {}), '(self.crossings[self.streets[street_index][0]])\n', (6616, 6663), False, 'from pymunk.vec2d import Vec2d\n'), ((7146, 7198), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][0]]'], {}), '(self.crossings[self.streets[street_index][0]])\n', (7151, 7198), False, 'from pymunk.vec2d import Vec2d\n'), ((7336, 7388), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[self.streets[street_index][0]]'], {}), '(self.crossings[self.streets[street_index][0]])\n', (7341, 7388), False, 'from pymunk.vec2d import Vec2d\n'), ((2919, 2956), 'pymunk.vec2d.Vec2d', 'Vec2d', (['self.crossings[crossing_index]'], {}), '(self.crossings[crossing_index])\n', (2924, 2956), False, 'from pymunk.vec2d import Vec2d\n')] |
import pyautogui
pyautogui.moveTo(2317, 425, duration=1)
| [
"pyautogui.moveTo"
] | [((18, 57), 'pyautogui.moveTo', 'pyautogui.moveTo', (['(2317)', '(425)'], {'duration': '(1)'}), '(2317, 425, duration=1)\n', (34, 57), False, 'import pyautogui\n')] |
import unittest
import paddle
import neural_renderer_paddle as nr
class TestLighting(unittest.TestCase):
def test_case1(self):
"""Test whether it is executable."""
faces = paddle.randn([64, 16, 3, 3], dtype=paddle.float32)
textures = paddle.randn([64, 16, 8, 8, 8, 3], dtype=paddle.float32)
nr.lighting(faces, textures)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"paddle.randn",
"neural_renderer_paddle.lighting"
] | [((396, 411), 'unittest.main', 'unittest.main', ([], {}), '()\n', (409, 411), False, 'import unittest\n'), ((200, 250), 'paddle.randn', 'paddle.randn', (['[64, 16, 3, 3]'], {'dtype': 'paddle.float32'}), '([64, 16, 3, 3], dtype=paddle.float32)\n', (212, 250), False, 'import paddle\n'), ((270, 326), 'paddle.randn', 'paddle.randn', (['[64, 16, 8, 8, 8, 3]'], {'dtype': 'paddle.float32'}), '([64, 16, 8, 8, 8, 3], dtype=paddle.float32)\n', (282, 326), False, 'import paddle\n'), ((335, 363), 'neural_renderer_paddle.lighting', 'nr.lighting', (['faces', 'textures'], {}), '(faces, textures)\n', (346, 363), True, 'import neural_renderer_paddle as nr\n')] |
"""
Citing from jasper from Nvidia
"""
import torch
import torch.nn as nn
import torch.functional as F
class SubBlock(nn.Module):
def __init__(self, dropout):
super(SubBlock, self).__init__()
self.conv = nn.Conv1d(in_channels=256, out_channels=256,
kernel_size=11, stride=1, padding=5)
self.batch_norm = nn.BatchNorm1d(num_features=256)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.conv(x)
x = self.batch_norm(x)
x = self.activation(x)
return self.dropout(x)
class Block():
def __init__(self):
pass | [
"torch.nn.BatchNorm1d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Conv1d"
] | [((231, 316), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(256)', 'out_channels': '(256)', 'kernel_size': '(11)', 'stride': '(1)', 'padding': '(5)'}), '(in_channels=256, out_channels=256, kernel_size=11, stride=1,\n padding=5)\n', (240, 316), True, 'import torch.nn as nn\n'), ((369, 401), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ([], {'num_features': '(256)'}), '(num_features=256)\n', (383, 401), True, 'import torch.nn as nn\n'), ((428, 437), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (435, 437), True, 'import torch.nn as nn\n'), ((461, 482), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (471, 482), True, 'import torch.nn as nn\n')] |
import pandas as pd
# Load the original csv
df = pd.read_csv('data/carto.csv')
# Group by name of borough
# The output is a list of tuples with ('name of borough', dataframe of that borough)
grouped = list(df.groupby('location_name'))
for x in grouped:
# Get the name of the borough
name_of_borough = x[0]
# Sort the dataframe by date
sorted_df = x[1].sort(['date'])
# Create a csv for each dataframe
sorted_df.to_csv('data/output/' + name_of_borough + '.tsv', index=None, sep='\t')
| [
"pandas.read_csv"
] | [((50, 79), 'pandas.read_csv', 'pd.read_csv', (['"""data/carto.csv"""'], {}), "('data/carto.csv')\n", (61, 79), True, 'import pandas as pd\n')] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import Dataset
import pytorch_lightning as pl
PATH_LEGACY = os.path.dirname(__file__)
class RandomDataset(Dataset):
def __init__(self, size, length: int = 100):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class DummyModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def _loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def _step(self, batch, batch_idx):
output = self.layer(batch)
loss = self._loss(batch, output)
# return {'loss': loss} # used for PL<1.0
return loss # used for PL >= 1.0
def training_step(self, batch, batch_idx):
return self._step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
self._step(batch, batch_idx)
def test_step(self, batch, batch_idx):
self._step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def main_train(dir_path, max_epochs: int = 5):
trainer = pl.Trainer(
default_root_dir=dir_path,
checkpoint_callback=True,
max_epochs=max_epochs,
)
model = DummyModel()
trainer.fit(model)
if __name__ == '__main__':
path_dir = os.path.join(PATH_LEGACY, 'checkpoints', str(pl.__version__))
main_train(path_dir)
| [
"torch.ones_like",
"torch.optim.lr_scheduler.StepLR",
"os.path.dirname",
"pytorch_lightning.Trainer",
"torch.nn.Linear",
"torch.randn"
] | [((694, 719), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (709, 719), False, 'import os\n'), ((2463, 2554), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'default_root_dir': 'dir_path', 'checkpoint_callback': '(True)', 'max_epochs': 'max_epochs'}), '(default_root_dir=dir_path, checkpoint_callback=True, max_epochs=\n max_epochs)\n', (2473, 2554), True, 'import pytorch_lightning as pl\n'), ((848, 873), 'torch.randn', 'torch.randn', (['length', 'size'], {}), '(length, size)\n', (859, 873), False, 'import torch\n'), ((1102, 1124), 'torch.nn.Linear', 'torch.nn.Linear', (['(32)', '(2)'], {}), '(32, 2)\n', (1117, 1124), False, 'import torch\n'), ((2006, 2061), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1)'}), '(optimizer, step_size=1)\n', (2037, 2061), False, 'import torch\n'), ((1379, 1406), 'torch.ones_like', 'torch.ones_like', (['prediction'], {}), '(prediction)\n', (1394, 1406), False, 'import torch\n')] |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def calculadora(request):
context = {
'titulo' : "Ingrese los numeros",
}
return render(request,'operaciones/formulario.html',context)
def resultado(request):
a=request.POST['numeroa']
b=request.POST['numerob']
if request.POST['operacion'] == 'suma':
resultado= int(a)+int(b)
if request.POST['operacion'] == 'resta':
resultado= int(a)-int(b)
if request.POST['operacion'] == 'multiplicacion':
resultado= int(a)*int(b)
context = {
'operacion' : request.POST['operacion'],
'numeroa' : request.POST['numeroa'],
'numerob' : request.POST['numerob'],
'titulo' : "Resultado de la operación",
'resultado' :resultado
}
return render(request,'operaciones/resultados.html',context)
def datosCilindro(request):
context = {
'titulo' : "CÁLCULO DEL VOLUMEN DE UN CILINDRO "
}
return render(request,'operaciones/formCilindro.html',context)
def resultVolumen(request):
diametro=request.POST['diametro']
altura=request.POST['altura']
radio=float(diametro)/2
volumen=(3.1416*(radio)**2)*float(altura)
context = {
'titulo' : 'VOLUMEN DEL CILINDRO',
'volumen' : volumen
}
return render(request,'operaciones/resultVolumen.html',context)
| [
"django.shortcuts.render"
] | [((202, 257), 'django.shortcuts.render', 'render', (['request', '"""operaciones/formulario.html"""', 'context'], {}), "(request, 'operaciones/formulario.html', context)\n", (208, 257), False, 'from django.shortcuts import render\n'), ((837, 892), 'django.shortcuts.render', 'render', (['request', '"""operaciones/resultados.html"""', 'context'], {}), "(request, 'operaciones/resultados.html', context)\n", (843, 892), False, 'from django.shortcuts import render\n'), ((1010, 1067), 'django.shortcuts.render', 'render', (['request', '"""operaciones/formCilindro.html"""', 'context'], {}), "(request, 'operaciones/formCilindro.html', context)\n", (1016, 1067), False, 'from django.shortcuts import render\n'), ((1347, 1405), 'django.shortcuts.render', 'render', (['request', '"""operaciones/resultVolumen.html"""', 'context'], {}), "(request, 'operaciones/resultVolumen.html', context)\n", (1353, 1405), False, 'from django.shortcuts import render\n')] |
import deeplabcut as dlc
import os
from fcutils.file_io.utils import listdir
# from fcutils.video.utils import trim_clip
config_file = 'D:\\Dropbox (UCL - SWC)\\Rotation_vte\\Locomotion\\dlc\\locomotion-Federico\\config.yaml'
dlc.train_network(config_file)
# fld = 'D:\\Dropbox (UCL - SWC)\\Rotation_vte\\Locomotion\\dlc'
# vids = [os.path.join(fld, '200203_CA8493_video_trim.mp4'), os.path.join(fld, '200204_CA8491_video_trim.mp4'), os.path.join(fld, '200204_CA8494_video_trim.mp4')]
# dlc.extract_outlier_frames(config_file, vids, epsilon=40)
# dlc.merge_datasets(config_file)
# vids = [f for f in listdir(fld) if f.endswith('.mp4')]
# for vid in vids:
# savepath = vid.split(".")[0]+'_trim.mp4'
# trim_clip(vid, savepath, start=0.25, stop=0.35) | [
"deeplabcut.train_network"
] | [((230, 260), 'deeplabcut.train_network', 'dlc.train_network', (['config_file'], {}), '(config_file)\n', (247, 260), True, 'import deeplabcut as dlc\n')] |
import pytest
from dnaio import Sequence
from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter
def test_issue_52():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=Where.BACK,
remove='suffix',
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True)
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,
remove_before=False, adapter=adapter, read=read)
assert am.wildcards() == 'GGC'
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue turned out to not be an actual issue with the alignment
# algorithm. The following alignment is found because it has more matches
# than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# This is correct, albeit a little surprising, since an alignment without
# indels would have only two errors.
adapter = Adapter(
sequence="TCGTATGCCGTCTTC",
where=Where.BACK,
remove='suffix',
max_error_rate=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False)
read = Sequence(name="seq2", sequence="TCGTATGCCCTCC")
result = adapter.match_to(read)
assert result.errors == 3, result
assert result.astart == 0, result
assert result.astop == 15, result
def test_str():
a = Adapter('ACGT', where=Where.BACK, remove='suffix', max_error_rate=0.1)
str(a)
str(a.match_to(Sequence(name='seq', sequence='TTACGT')))
def test_linked_adapter():
front_adapter = Adapter('AAAA', where=Where.PREFIX, min_overlap=4)
back_adapter = Adapter('TTTT', where=Where.BACK, min_overlap=3)
linked_adapter = LinkedAdapter(
front_adapter, back_adapter, front_required=True, back_required=False, name='name')
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 3
sequence = Sequence(name='seq', sequence='AAAACCCCCTTTT')
trimmed = linked_adapter.match_to(sequence).trimmed()
assert trimmed.name == 'seq'
assert trimmed.sequence == 'CCCCC'
def test_info_record():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=Where.BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo")
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2, remove_before=False,
adapter=adapter, read=read)
assert am.get_info_record() == (
"abc",
2,
5,
21,
'CCCCA',
'GAACTACAGTCCCGGC',
'',
'Foo',
'',
'',
'',
)
def test_random_match_probabilities():
a = Adapter('A', where=Where.BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25]
assert a.back.random_match_probabilities(0.2) == [1, 0.4]
for s in ('ACTG', 'XMWH'):
a = Adapter(s, where=Where.BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.back.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
a = Adapter('GTCA', where=Where.FRONT, max_error_rate=0.1).create_statistics()
assert a.front.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.front.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
def test_add_adapter_statistics():
stats = Adapter('A', name='name', where=Where.BACK, max_error_rate=0.1).create_statistics()
end_stats = stats.back
end_stats.adjacent_bases['A'] = 7
end_stats.adjacent_bases['C'] = 19
end_stats.adjacent_bases['G'] = 23
end_stats.adjacent_bases['T'] = 42
end_stats.adjacent_bases[''] = 45
end_stats.errors[10][0] = 100
end_stats.errors[10][1] = 11
end_stats.errors[10][2] = 3
end_stats.errors[20][0] = 600
end_stats.errors[20][1] = 66
end_stats.errors[20][2] = 6
stats2 = Adapter('A', name='name', where=Where.BACK, max_error_rate=0.1).create_statistics()
end_stats2 = stats2.back
end_stats2.adjacent_bases['A'] = 43
end_stats2.adjacent_bases['C'] = 31
end_stats2.adjacent_bases['G'] = 27
end_stats2.adjacent_bases['T'] = 8
end_stats2.adjacent_bases[''] = 5
end_stats2.errors[10][0] = 234
end_stats2.errors[10][1] = 14
end_stats2.errors[10][3] = 5
end_stats2.errors[15][0] = 90
end_stats2.errors[15][1] = 17
end_stats2.errors[15][2] = 2
stats += stats2
r = stats.back
assert r.adjacent_bases == {'A': 50, 'C': 50, 'G': 50, 'T': 50, '': 50}
assert r.errors == {
10: {0: 334, 1: 25, 2: 3, 3: 5},
15: {0: 90, 1: 17, 2: 2},
20: {0: 600, 1: 66, 2: 6},
}
def test_issue_265():
"""Crash when accessing the matches property of non-anchored linked adapters"""
s = Sequence('name', 'AAAATTTT')
front_adapter = Adapter('GGG', where=Where.FRONT)
back_adapter = Adapter('TTT', where=Where.BACK)
la = LinkedAdapter(front_adapter, back_adapter, front_required=False, back_required=False, name='name')
assert la.match_to(s).matches == 3
@pytest.mark.parametrize("where", [Where.PREFIX, Where.SUFFIX])
def test_no_indels_empty_read(where):
# Issue #376
adapter = Adapter('ACGT', where=where, indels=False)
empty = Sequence('name', '')
adapter.match_to(empty)
| [
"cutadapt.adapters.Adapter",
"cutadapt.adapters.Match",
"pytest.mark.parametrize",
"dnaio.Sequence",
"cutadapt.adapters.LinkedAdapter"
] | [((5733, 5795), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""where"""', '[Where.PREFIX, Where.SUFFIX]'], {}), "('where', [Where.PREFIX, Where.SUFFIX])\n", (5756, 5795), False, 'import pytest\n'), ((146, 309), 'cutadapt.adapters.Adapter', 'Adapter', ([], {'sequence': '"""GAACTCCAGTCACNNNNN"""', 'where': 'Where.BACK', 'remove': '"""suffix"""', 'max_error_rate': '(0.12)', 'min_overlap': '(5)', 'read_wildcards': '(False)', 'adapter_wildcards': '(True)'}), "(sequence='GAACTCCAGTCACNNNNN', where=Where.BACK, remove='suffix',\n max_error_rate=0.12, min_overlap=5, read_wildcards=False,\n adapter_wildcards=True)\n", (153, 309), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((370, 424), 'dnaio.Sequence', 'Sequence', ([], {'name': '"""abc"""', 'sequence': '"""CCCCAGAACTACAGTCCCGGC"""'}), "(name='abc', sequence='CCCCAGAACTACAGTCCCGGC')\n", (378, 424), False, 'from dnaio import Sequence\n'), ((434, 554), 'cutadapt.adapters.Match', 'Match', ([], {'astart': '(0)', 'astop': '(17)', 'rstart': '(5)', 'rstop': '(21)', 'matches': '(15)', 'errors': '(2)', 'remove_before': '(False)', 'adapter': 'adapter', 'read': 'read'}), '(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,\n remove_before=False, adapter=adapter, read=read)\n', (439, 554), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((1322, 1482), 'cutadapt.adapters.Adapter', 'Adapter', ([], {'sequence': '"""TCGTATGCCGTCTTC"""', 'where': 'Where.BACK', 'remove': '"""suffix"""', 'max_error_rate': '(0.2)', 'min_overlap': '(3)', 'read_wildcards': '(False)', 'adapter_wildcards': '(False)'}), "(sequence='TCGTATGCCGTCTTC', where=Where.BACK, remove='suffix',\n max_error_rate=0.2, min_overlap=3, read_wildcards=False,\n adapter_wildcards=False)\n", (1329, 1482), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((1543, 1590), 'dnaio.Sequence', 'Sequence', ([], {'name': '"""seq2"""', 'sequence': '"""TCGTATGCCCTCC"""'}), "(name='seq2', sequence='TCGTATGCCCTCC')\n", (1551, 1590), False, 'from dnaio import Sequence\n'), ((1767, 1837), 'cutadapt.adapters.Adapter', 'Adapter', (['"""ACGT"""'], {'where': 'Where.BACK', 'remove': '"""suffix"""', 'max_error_rate': '(0.1)'}), "('ACGT', where=Where.BACK, remove='suffix', max_error_rate=0.1)\n", (1774, 1837), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((1959, 2009), 'cutadapt.adapters.Adapter', 'Adapter', (['"""AAAA"""'], {'where': 'Where.PREFIX', 'min_overlap': '(4)'}), "('AAAA', where=Where.PREFIX, min_overlap=4)\n", (1966, 2009), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((2029, 2077), 'cutadapt.adapters.Adapter', 'Adapter', (['"""TTTT"""'], {'where': 'Where.BACK', 'min_overlap': '(3)'}), "('TTTT', where=Where.BACK, min_overlap=3)\n", (2036, 2077), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((2100, 2201), 'cutadapt.adapters.LinkedAdapter', 'LinkedAdapter', (['front_adapter', 'back_adapter'], {'front_required': '(True)', 'back_required': '(False)', 'name': '"""name"""'}), "(front_adapter, back_adapter, front_required=True,\n back_required=False, name='name')\n", (2113, 2201), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((2336, 2382), 'dnaio.Sequence', 'Sequence', ([], {'name': '"""seq"""', 'sequence': '"""AAAACCCCCTTTT"""'}), "(name='seq', sequence='AAAACCCCCTTTT')\n", (2344, 2382), False, 'from dnaio import Sequence\n'), ((2553, 2713), 'cutadapt.adapters.Adapter', 'Adapter', ([], {'sequence': '"""GAACTCCAGTCACNNNNN"""', 'where': 'Where.BACK', 'max_error_rate': '(0.12)', 'min_overlap': '(5)', 'read_wildcards': '(False)', 'adapter_wildcards': '(True)', 'name': '"""Foo"""'}), "(sequence='GAACTCCAGTCACNNNNN', where=Where.BACK, max_error_rate=\n 0.12, min_overlap=5, read_wildcards=False, adapter_wildcards=True, name\n ='Foo')\n", (2560, 2713), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((2772, 2826), 'dnaio.Sequence', 'Sequence', ([], {'name': '"""abc"""', 'sequence': '"""CCCCAGAACTACAGTCCCGGC"""'}), "(name='abc', sequence='CCCCAGAACTACAGTCCCGGC')\n", (2780, 2826), False, 'from dnaio import Sequence\n'), ((2836, 2956), 'cutadapt.adapters.Match', 'Match', ([], {'astart': '(0)', 'astop': '(17)', 'rstart': '(5)', 'rstop': '(21)', 'matches': '(15)', 'errors': '(2)', 'remove_before': '(False)', 'adapter': 'adapter', 'read': 'read'}), '(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,\n remove_before=False, adapter=adapter, read=read)\n', (2841, 2956), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((5448, 5476), 'dnaio.Sequence', 'Sequence', (['"""name"""', '"""AAAATTTT"""'], {}), "('name', 'AAAATTTT')\n", (5456, 5476), False, 'from dnaio import Sequence\n'), ((5497, 5530), 'cutadapt.adapters.Adapter', 'Adapter', (['"""GGG"""'], {'where': 'Where.FRONT'}), "('GGG', where=Where.FRONT)\n", (5504, 5530), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((5550, 5582), 'cutadapt.adapters.Adapter', 'Adapter', (['"""TTT"""'], {'where': 'Where.BACK'}), "('TTT', where=Where.BACK)\n", (5557, 5582), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((5592, 5694), 'cutadapt.adapters.LinkedAdapter', 'LinkedAdapter', (['front_adapter', 'back_adapter'], {'front_required': '(False)', 'back_required': '(False)', 'name': '"""name"""'}), "(front_adapter, back_adapter, front_required=False,\n back_required=False, name='name')\n", (5605, 5694), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((5865, 5907), 'cutadapt.adapters.Adapter', 'Adapter', (['"""ACGT"""'], {'where': 'where', 'indels': '(False)'}), "('ACGT', where=where, indels=False)\n", (5872, 5907), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((5920, 5940), 'dnaio.Sequence', 'Sequence', (['"""name"""', '""""""'], {}), "('name', '')\n", (5928, 5940), False, 'from dnaio import Sequence\n'), ((1868, 1907), 'dnaio.Sequence', 'Sequence', ([], {'name': '"""seq"""', 'sequence': '"""TTACGT"""'}), "(name='seq', sequence='TTACGT')\n", (1876, 1907), False, 'from dnaio import Sequence\n'), ((3210, 3260), 'cutadapt.adapters.Adapter', 'Adapter', (['"""A"""'], {'where': 'Where.BACK', 'max_error_rate': '(0.1)'}), "('A', where=Where.BACK, max_error_rate=0.1)\n", (3217, 3260), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((3727, 3781), 'cutadapt.adapters.Adapter', 'Adapter', (['"""GTCA"""'], {'where': 'Where.FRONT', 'max_error_rate': '(0.1)'}), "('GTCA', where=Where.FRONT, max_error_rate=0.1)\n", (3734, 3781), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((4044, 4107), 'cutadapt.adapters.Adapter', 'Adapter', (['"""A"""'], {'name': '"""name"""', 'where': 'Where.BACK', 'max_error_rate': '(0.1)'}), "('A', name='name', where=Where.BACK, max_error_rate=0.1)\n", (4051, 4107), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((4561, 4624), 'cutadapt.adapters.Adapter', 'Adapter', (['"""A"""'], {'name': '"""name"""', 'where': 'Where.BACK', 'max_error_rate': '(0.1)'}), "('A', name='name', where=Where.BACK, max_error_rate=0.1)\n", (4568, 4624), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n'), ((3450, 3498), 'cutadapt.adapters.Adapter', 'Adapter', (['s'], {'where': 'Where.BACK', 'max_error_rate': '(0.1)'}), '(s, where=Where.BACK, max_error_rate=0.1)\n', (3457, 3498), False, 'from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter\n')] |
#!/bin/python3
import os
#
# Complete the 'solve' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts following parameters:
# 1. 2D_INTEGER_ARRAY tree
# 2. 2D_INTEGER_ARRAY queries
#
def solve(tree, queries):
# Write your code here
from bisect import bisect_right
def find(x, p):
while p[x] != x:
p[x] = p[p[x]]
x = p[x]
return p[x]
def union(x, y, w8, p, r, d):
px = find(x, p)
py = find(y, p)
d[w8] += len(r[px]) * len(r[py])
if px != py:
if len(r[py]) < len(r[px]):
p[py] = px
r[px].update(r[py])
del r[py]
else:
p[px] = py
r[py].update(r[px])
del r[px]
ln = len(tree) + 1
tree.sort(key=lambda x: x[-1])
paths = {0: 0}
weights = [0]
parents = {i: i for i in range(1, ln + 1)}
rep = {i: {i} for i in range(1, ln + 1)}
prev = 0
for u, v, w in tree:
if w != prev:
weights.append(w)
paths[w] = paths[prev]
union(u, v, w, parents, rep, paths)
prev = w
for left, right in queries:
wr = weights[bisect_right(weights, right) - 1]
wl = weights[bisect_right(weights, left - 1) - 1]
yield paths[wr] - paths[wl]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
q = int(first_multiple_input[1])
tree = []
for _ in range(n - 1):
tree.append(list(map(int, input().rstrip().split())))
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
result = solve(tree, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| [
"bisect.bisect_right"
] | [((1244, 1272), 'bisect.bisect_right', 'bisect_right', (['weights', 'right'], {}), '(weights, right)\n', (1256, 1272), False, 'from bisect import bisect_right\n'), ((1299, 1330), 'bisect.bisect_right', 'bisect_right', (['weights', '(left - 1)'], {}), '(weights, left - 1)\n', (1311, 1330), False, 'from bisect import bisect_right\n')] |
#!usr/bin/python2.7
# coding=utf-8
#######################################################
# Name : Multi BF (MBF) <cookie method> #
# File : search_name.py #
# Author : DulLah #
# Github : https://github.com/dz-id #
# Facebook : https://www.facebook.com/dulahz #
# Telegram : https://t.me/unikers #
# Python version : 2.7 #
#######################################################
import os, re, sys, json
from bs4 import BeautifulSoup as parser
from datetime import datetime
def main(self, cookie, url, config):
ask = raw_input('\nQuery name: ')
if ask.strip() == '':
exit("\n\033[0;91mRequired, can't empty.\033[0m")
try:
max = int(raw_input('How many? (ex: 100): '))
except ValueError:
exit("\n\033[0;91mStuppid.\033[0m")
if max == 0:
exit("\n\033[0;91mRequired, can't empty.\033[0m")
url_search = url+'/search/people/?q='+ask
statusStop = False
output = 'dump/'+ask.replace(' ', '_')+'.json'.strip()
id = []
print('')
while True:
try:
response = config.httpRequest(url_search, cookie).encode('utf-8')
html = parser(response, 'html.parser')
find = html.find_all('a')
for i in find:
name = i.find('div')
if '+' in str(name) or name == None:
continue
else:
full_name = str(name.text.encode('utf-8'))
if 'profile.php?id=' in str(i):
uid = re.findall(r'\?id=(.*?)&', str(i))
else:
uid = re.findall('/(.*?)\?refid=', str(i))
if len(uid) == 1:
id.append({'uid': uid[0], 'name': full_name})
sys.stdout.write("\r - %s \r\n[\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] Writing Id don't close."%(
full_name, datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
if len(id) == max or len(id) > max:
statusStop = True
break
if statusStop == False:
if 'Lihat Hasil Selanjutnya' in str(html):
url_search = html.find('a', string='Lihat Hasil Selanjutnya')['href']
else: break
else: break
except KeyboardInterrupt:
print('\n\n\033[0;91mKeyInterrupt, stopped!!\033[0m')
break
try:
for filename in os.listdir('dump'):
os.remove('dump/'+filename)
except: pass
print('\n\nOutput: '+output)
save = open(output, 'w')
save.write(json.dumps(id))
save.close()
| [
"os.listdir",
"json.dumps",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"sys.stdout.flush",
"os.remove"
] | [((2249, 2267), 'os.listdir', 'os.listdir', (['"""dump"""'], {}), "('dump')\n", (2259, 2267), False, 'import os, re, sys, json\n'), ((2382, 2396), 'json.dumps', 'json.dumps', (['id'], {}), '(id)\n', (2392, 2396), False, 'import os, re, sys, json\n'), ((1203, 1234), 'bs4.BeautifulSoup', 'parser', (['response', '"""html.parser"""'], {}), "(response, 'html.parser')\n", (1209, 1234), True, 'from bs4 import BeautifulSoup as parser\n'), ((2272, 2301), 'os.remove', 'os.remove', (["('dump/' + filename)"], {}), "('dump/' + filename)\n", (2281, 2301), False, 'import os, re, sys, json\n'), ((1855, 1873), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1871, 1873), False, 'import os, re, sys, json\n'), ((1801, 1815), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1813, 1815), False, 'from datetime import datetime\n')] |
#
# Copyright (c) 2021 Idiap Research Institute, https://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
""" Computes the proportion of novel bigrams in the summary. """
import numpy as np
import pandas as pd
from interface import Evaluation
from eval_utils import preprocess_article, preprocess_summary
class NovelBigrams(Evaluation):
def __init__(self, input_dir, sent_sep='<q>'):
super().__init__(input_dir)
self.name = 'novel_bigrams'
self.sent_sep = sent_sep
def run(self):
# read articles and summaries
articles = self.read_articles()
summaries = self.read_candidate_summaries()
assert len(articles) == len(summaries)
# compute novel bigrams for each article-summary pair
novel_bigrams = []
for article, summary in zip(articles, summaries):
article_words = preprocess_article(article)
summary_tokenized_sents = preprocess_summary(summary, self.sent_sep)
novel_bigrams.append(NovelBigrams.compute_novel_bigrams(article_words, summary_tokenized_sents))
novel_bigrams = [score for score in novel_bigrams if score is not None] # filter bad summaries
# write results
df = pd.DataFrame({'novel_bigrams': np.mean(novel_bigrams)}, index=[0])
df.to_csv(self.get_output_path(), index=False)
@staticmethod
def compute_novel_bigrams(article_words, summary_tokenized_sents):
""" Computes the proportion of novel bigrams in the summary. """
bigrams_article = set((article_words[i], article_words[i + 1]) for i in range(len(article_words) - 1))
bigrams_summary = set()
for sentence_words in summary_tokenized_sents:
bigrams_summary |= set((sentence_words[i], sentence_words[i + 1]) for i in range(len(sentence_words) - 1))
return len(bigrams_summary - bigrams_article) / len(bigrams_summary) if len(bigrams_summary) > 0 else None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Computes the proportion of novel bigrams in the summary.')
parser.add_argument('--eval_dir', required=True, help='Evaluation directory')
args = parser.parse_args()
NovelBigrams(args.eval_dir).run()
| [
"numpy.mean",
"eval_utils.preprocess_summary",
"eval_utils.preprocess_article",
"argparse.ArgumentParser"
] | [((2008, 2108), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Computes the proportion of novel bigrams in the summary."""'}), "(description=\n 'Computes the proportion of novel bigrams in the summary.')\n", (2031, 2108), False, 'import argparse\n'), ((869, 896), 'eval_utils.preprocess_article', 'preprocess_article', (['article'], {}), '(article)\n', (887, 896), False, 'from eval_utils import preprocess_article, preprocess_summary\n'), ((935, 977), 'eval_utils.preprocess_summary', 'preprocess_summary', (['summary', 'self.sent_sep'], {}), '(summary, self.sent_sep)\n', (953, 977), False, 'from eval_utils import preprocess_article, preprocess_summary\n'), ((1260, 1282), 'numpy.mean', 'np.mean', (['novel_bigrams'], {}), '(novel_bigrams)\n', (1267, 1282), True, 'import numpy as np\n')] |
import argparse
import channel_access.common as ca
import channel_access.client as cac
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Read process values')
parser.add_argument('pvs', metavar='PV', type=str, nargs='+',
help='list of process values')
args = parser.parse_args()
with cac.Client() as client:
# Create list of PVs. They automatically connect in the background.
# We don't need monitoring or automatic retreival of data.
pvs = [ client.createPV(name, monitor=False, initialize=cac.InitData.NONE) for name in args.pvs ]
# Asynchronous requests are queued. In order for all background
# request to be send we flush here so while waiting for the
# first pvs to connected the others can also connect in the
# background.
client.flush()
for pv in pvs:
print(pv.name, end='\t')
try:
# Make sure the pvs is connected before calling other
# functions, this can block for up to one second.
pv.ensure_connected(timeout=1.0)
# Retreive the value, this can block for up to
# one second. This also retreives timestamp, status and severity
if pv.get(block=1.0) is None:
raise RuntimeError('No value')
except RuntimeError:
# Something went wrong
print('NOT FOUND')
else:
# We need access to multiple attributes whose values
# should all be from the same request. For this we need
# to access the data dictionary ourselfs. ``pv.data``
# returns a copy of the data dictionary.
# Using ``pv.timestamp`` and ``pv.value`` does not guarantee that
# the timestamp belongs to the value.
data = pv.data
print("{timestamp}\t{value}".format(**data))
| [
"channel_access.client.Client",
"argparse.ArgumentParser"
] | [((131, 189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Read process values"""'}), "(description='Read process values')\n", (154, 189), False, 'import argparse\n'), ((352, 364), 'channel_access.client.Client', 'cac.Client', ([], {}), '()\n', (362, 364), True, 'import channel_access.client as cac\n')] |
from scipy.special import factorial
from itertools import count
import numpy as np
from tmps.utils import pauli, fock
def get_boson_boson_dim(alpha, cutoff_coh):
"""
Find the cutoff for the local dimension (identical everywhere) from the chosen accuracy alpha for the impurity
coherent state.
"""
#
pop = lambda x: np.exp(-np.abs(alpha) ** 2 / 2) * alpha ** x / np.sqrt(factorial(x, exact=True))
cutoff_dim = 2
for n in count(cutoff_dim, 1):
if np.abs(pop(n))**2 < cutoff_coh:
cutoff_dim = n
break
return cutoff_dim
def get_spin_boson_chain_hamiltonian(omega_0, c0, omega, t, bath_local_dim, finite_T=False):
"""
Returns local and coupling parts of the Spin-Boson model chain Hamiltonian
used in Sec. 4.4.1 and 4.4.2 of the thesis.
:param omega_0: Spin energy
:param c0: Spin-Bath coupling
:param omega: Bath energies
:param t: Bath-bath couplings
:param bath_local_dim: Local dimension of the bath
:param finite_T: If set True builds the Hamiltonian for Sec. 4.4.2. If False builds the Hamiltonian for Sec. 4.4.1
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
if not finite_T:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.X
# Coupling between System and bath:
spin_coupl = pauli.Z
else:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.Z
# Coupling between System and bath:
spin_coupl = np.array([[0, 0], [1, 0]], dtype=np.complex128)
# Local Hamiltonian of the bath
fock_n = fock.n(bath_local_dim)
bath_loc = [energy * fock_n for energy in omega]
# Bath coupling
bath_coupling_op = np.kron(fock.a(bath_local_dim), fock.a_dag(bath_local_dim)) + \
np.kron(fock.a_dag(bath_local_dim), fock.a(bath_local_dim))
bath_bath_coupl = [coupling * bath_coupling_op for coupling in t]
# Spin-Bath coupling
spin_bath_coupl = c0 * (np.kron(spin_coupl, fock.a_dag(bath_local_dim)) +
np.kron(spin_coupl.conj().T, fock.a(bath_local_dim)))
return [spin_loc] + bath_loc, [spin_bath_coupl] + bath_bath_coupl
def get_spin_boson_star_hamiltonian(omega_0, system_index, gamma, xi, bath_local_dim, finite_T=False):
"""
Returns local and coupling parts of the Spin-Boson model star Hamiltonian
used in Sec. 4.4.1 and 4.4.2 of the thesis.
:param omega_0: Spin energy
:param system_index: Index of the system in the auxiliary chain
:param gamma: System-Bath couplings
:param xi: Bath energies
:param bath_local_dim: Local dimension of the bath
:param finite_T: If set True uses the Hamiltonian for Sec. 4.4.2. If False builds the Hamiltonian for Sec. 4.4.1
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
if not finite_T:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.X
# Coupling between System and bath:
spin_coupl = pauli.Z
else:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.Z
# Coupling between System and bath:
spin_coupl = np.array([[0, 0], [1, 0]], dtype=np.complex128)
# Local Hamiltonian of the bath
fock_n = fock.n(bath_local_dim)
bath_loc = [energy * fock_n for energy in xi]
# Coupling operators for the bath to the left of the system
left_bath_coupling_op = np.kron(fock.a(bath_local_dim), spin_coupl.conj().T) + \
np.kron(fock.a_dag(bath_local_dim), spin_coupl)
left_bath_coupl = [coupling * left_bath_coupling_op for coupling in gamma[:system_index]]
# Coupling operators for the bath to the right of the system
right_bath_coupling_op = np.kron(spin_coupl.conj().T, fock.a(bath_local_dim)) + \
np.kron(spin_coupl, fock.a_dag(bath_local_dim))
right_bath_coupl = [coupling * right_bath_coupling_op for coupling in gamma[system_index:]]
return bath_loc[:system_index] + [spin_loc] + bath_loc[system_index:], left_bath_coupl + right_bath_coupl
def get_boson_boson_chain_hamiltonian(omega_0, c0, omega, t, cutoff_dim):
"""
Returns local and coupling parts of the Spin-Boson model chain Hamiltonian
used in Sec. 4.4.3 of the thesis.
:param omega_0: Spin energy
:param c0: Spin-Bath coupling
:param omega: Bath energies
:param t: Bath-bath couplings
:param cutoff_dim: Local dimension of the impurity and bath
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
# Local Hamiltonian of the System:
sys_loc = omega_0 * fock.n(cutoff_dim)
# Coupling between System and bath:
sys_coupl = fock.a(cutoff_dim)
# Local Hamiltonian of the bath
fock_n = fock.n(cutoff_dim)
bath_loc = [energy * fock_n for energy in omega]
# Bath coupling
bath_coupling_op = np.kron(fock.a(cutoff_dim), fock.a_dag(cutoff_dim)) + \
np.kron(fock.a_dag(cutoff_dim), fock.a(cutoff_dim))
bath_bath_coupl = [coupling * bath_coupling_op for coupling in t]
# Spin-Bath coupling
spin_bath_coupl = c0 * (np.kron(sys_coupl, fock.a_dag(cutoff_dim)) +
np.kron(sys_coupl.conj().T, fock.a(cutoff_dim)))
return [sys_loc] + bath_loc, [spin_bath_coupl] + bath_bath_coupl
def get_boson_boson_star_hamiltonian(omega_0, system_index, gamma, xi, cutoff_dim):
"""
Returns local and coupling parts of the Spin-Boson model star Hamiltonian
used in Sec. 4.4.3 of the thesis.
:param omega_0: Spin energy
:param system_index: Index of the system in the auxiliary chain
:param gamma: System-Bath couplings
:param xi: Bath energies
:param cutoff_dim: Local dimension of the impurity and bath
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
# Local Hamiltonian of the System:
sys_loc = omega_0 * fock.n(cutoff_dim)
# Coupling between System and bath:
sys_coupl = fock.a(cutoff_dim)
# Local Hamiltonian of the bath
fock_n = fock.n(cutoff_dim)
bath_loc = [energy * fock_n for energy in xi]
# Coupling operators for the bath to the left of the system
left_bath_coupling_op = np.kron(fock.a(cutoff_dim), sys_coupl.conj().T) + \
np.kron(fock.a_dag(cutoff_dim), sys_coupl)
left_bath_coupl = [coupling * left_bath_coupling_op for coupling in gamma[:system_index]]
# Coupling operators for the bath to the right of the system
right_bath_coupling_op = np.kron(sys_coupl.conj().T, fock.a(cutoff_dim)) + \
np.kron(sys_coupl, fock.a_dag(cutoff_dim))
right_bath_coupl = [coupling * right_bath_coupling_op for coupling in gamma[system_index:]]
return bath_loc[:system_index] + [sys_loc] + bath_loc[system_index:], left_bath_coupl + right_bath_coupl
| [
"tmps.utils.fock.a_dag",
"numpy.abs",
"scipy.special.factorial",
"numpy.array",
"tmps.utils.fock.n",
"tmps.utils.fock.a",
"itertools.count"
] | [((462, 482), 'itertools.count', 'count', (['cutoff_dim', '(1)'], {}), '(cutoff_dim, 1)\n', (467, 482), False, 'from itertools import count\n'), ((1654, 1676), 'tmps.utils.fock.n', 'fock.n', (['bath_local_dim'], {}), '(bath_local_dim)\n', (1660, 1676), False, 'from tmps.utils import pauli, fock\n'), ((3352, 3374), 'tmps.utils.fock.n', 'fock.n', (['bath_local_dim'], {}), '(bath_local_dim)\n', (3358, 3374), False, 'from tmps.utils import pauli, fock\n'), ((4803, 4821), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (4809, 4821), False, 'from tmps.utils import pauli, fock\n'), ((4872, 4890), 'tmps.utils.fock.n', 'fock.n', (['cutoff_dim'], {}), '(cutoff_dim)\n', (4878, 4890), False, 'from tmps.utils import pauli, fock\n'), ((6104, 6122), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6110, 6122), False, 'from tmps.utils import pauli, fock\n'), ((6173, 6191), 'tmps.utils.fock.n', 'fock.n', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6179, 6191), False, 'from tmps.utils import pauli, fock\n'), ((1557, 1604), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {'dtype': 'np.complex128'}), '([[0, 0], [1, 0]], dtype=np.complex128)\n', (1565, 1604), True, 'import numpy as np\n'), ((3254, 3301), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {'dtype': 'np.complex128'}), '([[0, 0], [1, 0]], dtype=np.complex128)\n', (3262, 3301), True, 'import numpy as np\n'), ((4727, 4745), 'tmps.utils.fock.n', 'fock.n', (['cutoff_dim'], {}), '(cutoff_dim)\n', (4733, 4745), False, 'from tmps.utils import pauli, fock\n'), ((6028, 6046), 'tmps.utils.fock.n', 'fock.n', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6034, 6046), False, 'from tmps.utils import pauli, fock\n'), ((1782, 1804), 'tmps.utils.fock.a', 'fock.a', (['bath_local_dim'], {}), '(bath_local_dim)\n', (1788, 1804), False, 'from tmps.utils import pauli, fock\n'), ((1806, 1832), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['bath_local_dim'], {}), '(bath_local_dim)\n', (1816, 1832), False, 'from tmps.utils import pauli, fock\n'), ((1869, 1895), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['bath_local_dim'], {}), '(bath_local_dim)\n', (1879, 1895), False, 'from tmps.utils import pauli, fock\n'), ((1897, 1919), 'tmps.utils.fock.a', 'fock.a', (['bath_local_dim'], {}), '(bath_local_dim)\n', (1903, 1919), False, 'from tmps.utils import pauli, fock\n'), ((3526, 3548), 'tmps.utils.fock.a', 'fock.a', (['bath_local_dim'], {}), '(bath_local_dim)\n', (3532, 3548), False, 'from tmps.utils import pauli, fock\n'), ((3611, 3637), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['bath_local_dim'], {}), '(bath_local_dim)\n', (3621, 3637), False, 'from tmps.utils import pauli, fock\n'), ((3868, 3890), 'tmps.utils.fock.a', 'fock.a', (['bath_local_dim'], {}), '(bath_local_dim)\n', (3874, 3890), False, 'from tmps.utils import pauli, fock\n'), ((3945, 3971), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['bath_local_dim'], {}), '(bath_local_dim)\n', (3955, 3971), False, 'from tmps.utils import pauli, fock\n'), ((4996, 5014), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (5002, 5014), False, 'from tmps.utils import pauli, fock\n'), ((5016, 5038), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['cutoff_dim'], {}), '(cutoff_dim)\n', (5026, 5038), False, 'from tmps.utils import pauli, fock\n'), ((5075, 5097), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['cutoff_dim'], {}), '(cutoff_dim)\n', (5085, 5097), False, 'from tmps.utils import pauli, fock\n'), ((5099, 5117), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (5105, 5117), False, 'from tmps.utils import pauli, fock\n'), ((6343, 6361), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6349, 6361), False, 'from tmps.utils import pauli, fock\n'), ((6423, 6445), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6433, 6445), False, 'from tmps.utils import pauli, fock\n'), ((6674, 6692), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6680, 6692), False, 'from tmps.utils import pauli, fock\n'), ((6746, 6768), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['cutoff_dim'], {}), '(cutoff_dim)\n', (6756, 6768), False, 'from tmps.utils import pauli, fock\n'), ((404, 428), 'scipy.special.factorial', 'factorial', (['x'], {'exact': '(True)'}), '(x, exact=True)\n', (413, 428), False, 'from scipy.special import factorial\n'), ((2065, 2091), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['bath_local_dim'], {}), '(bath_local_dim)\n', (2075, 2091), False, 'from tmps.utils import pauli, fock\n'), ((2152, 2174), 'tmps.utils.fock.a', 'fock.a', (['bath_local_dim'], {}), '(bath_local_dim)\n', (2158, 2174), False, 'from tmps.utils import pauli, fock\n'), ((5262, 5284), 'tmps.utils.fock.a_dag', 'fock.a_dag', (['cutoff_dim'], {}), '(cutoff_dim)\n', (5272, 5284), False, 'from tmps.utils import pauli, fock\n'), ((5344, 5362), 'tmps.utils.fock.a', 'fock.a', (['cutoff_dim'], {}), '(cutoff_dim)\n', (5350, 5362), False, 'from tmps.utils import pauli, fock\n'), ((357, 370), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (363, 370), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import time
class Logbook(object):
def __init__(self):
"""
Class initialization
"""
self._id = time.strftime("%d%m%Y_%H%M%S", time.gmtime()) #id of the log, it's the timestamp
self._trial = 0
self._pinv = 0.0
self._rinv = 0.0
self._pmult = 0.0
self._rmult = 0.0
self._gaze = False
self._pointing = False
self._timer = 0
self._mp3 = ""
#create the file
open(self._id + ".csv", 'a').close()
#Write the header as first line
try:
path_to_file = self._id + ".csv"
with open(path_to_file, "a") as f:
f.write( "trial," + "pinv," + "rinv," + "pmult," + "rmult," + "total," + "gaze," + "pointing," + "timer," + "audio" + '\n')
f.close()
except:
# log exception
print("* LOGBOOK: execpion creating the header.")
def AddTextLine(self, stringToAdd):
try:
path_to_file = self._id + ".csv"
with open(path_to_file, "a") as f:
f.write( stringToAdd + '\n')
f.close()
except:
# log exception
print("* LOGBOOK: execpion adding a text line to the file.")
def AddLine(self, trial, pinv, rinv, pmult, rmult, total, gaze, pointing, timer, mp3 ):
try:
self._trial = trial
self._pinv = pinv
self._rinv = rinv
self._pmult = pmult
self._rmult = rmult
self._gaze = gaze
self._pointing = pointing
self._mp3 = mp3
self._timer = timer
path_to_file = self._id + ".csv"
with open(path_to_file, "a") as f:
f.write( str(trial) + "," + str(pinv) + "," + str(rinv) + "," + str(pmult) + "," + str(rmult) + "," + str(total) + "," + str(gaze) + "," + str(pointing) + "," + str(timer) + "," + str(mp3) + '\n')
f.close()
except:
# log exception
print("* LOGBOOK: execpion adding a line to the file.")
def SaveFile(self, filePath):
print(filePath)
if os.path.isfile(filePath):
self._path = filePath
self._doc = minidom.parse(filePath)
print("PARSER: the XML file was correctly loaded.")
return True
else:
print("PARSER: Error the XML file does not exist, please check if the path is correct.")
return False
| [
"os.path.isfile",
"time.gmtime"
] | [((2227, 2251), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (2241, 2251), False, 'import os\n'), ((197, 210), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (208, 210), False, 'import time\n')] |
from sys import stderr
from study.models import Result, ActiveTranslation
def update_score(learner, result, verified=False):
"""
Update the score after a phrase has been judged.
:param result: Result.CORRECT, Result.CLOSE or Result.INCORRECT
:return: Result instance
"""
if result == Result.CORRECT:
base = learner.reward_magnitude
elif result == Result.CLOSE:
base = - learner.reward_magnitude
elif result == Result.INCORRECT:
base = -2 * learner.reward_magnitude
else:
raise Exception('Scoring does not know how to deal with result = %s' % result)
learner.study_active.score += base
#todo: take history into account: the same phrase correct 5 times in a row should increase score a lot (don't show again) [actually independent of new result: many correct before should amplify result]
#if learner.study_show_learn:
# judge_translation = learner.study_shown
#else:
# judge_translation = learner.study_hidden
#print judge_translation.language
#try:
# active = ActiveTranslation.objects.get(learner = learner, translation = judge_translation)
#except ActiveTranslation.DoesNotExist:
# raise ActiveTranslation.DoesNotExist('The ActiveTranslation with learner="%s" translation="%" (%s) was not found while assigning scores. This means it disappeared between asking the question and answering it (they shouldn\'t disappear) or that the algorithm doesn\'t recover it correctly')
#if not judge_translation.language == request.LEARN_LANG:
# stderr.write('the translation to which score will be assigned is not in the learning langauge')
result = Result(
learner = learner,
asked = learner.study_hidden,
known = learner.study_shown,
result = result,
verified = False
)
learner.study_active.save()
result.save()
return result
| [
"study.models.Result"
] | [((1576, 1690), 'study.models.Result', 'Result', ([], {'learner': 'learner', 'asked': 'learner.study_hidden', 'known': 'learner.study_shown', 'result': 'result', 'verified': '(False)'}), '(learner=learner, asked=learner.study_hidden, known=learner.\n study_shown, result=result, verified=False)\n', (1582, 1690), False, 'from study.models import Result, ActiveTranslation\n')] |
#!/usr/bin/env python
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import sys
from readmemaker import ReadmeMaker
PROJECT_NAME = "tabledata"
OUTPUT_DIR = ".."
def main():
maker = ReadmeMaker(
PROJECT_NAME,
OUTPUT_DIR,
is_make_toc=True,
project_url=f"https://github.com/thombashi/{PROJECT_NAME}",
)
maker.inc_indent_level()
maker.write_chapter("Summary")
maker.write_introduction_file("summary.txt")
maker.write_introduction_file("badges.txt")
maker.write_introduction_file("installation.rst")
maker.set_indent_level(0)
maker.write_chapter("Documentation")
maker.write_lines([f"https://{PROJECT_NAME:s}.rtfd.io/"])
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"readmemaker.ReadmeMaker"
] | [((186, 306), 'readmemaker.ReadmeMaker', 'ReadmeMaker', (['PROJECT_NAME', 'OUTPUT_DIR'], {'is_make_toc': '(True)', 'project_url': 'f"""https://github.com/thombashi/{PROJECT_NAME}"""'}), "(PROJECT_NAME, OUTPUT_DIR, is_make_toc=True, project_url=\n f'https://github.com/thombashi/{PROJECT_NAME}')\n", (197, 306), False, 'from readmemaker import ReadmeMaker\n')] |
#! /usr/bin/env python
from expired_cert_finder.plugins.raw import RawParser
from expired_cert_finder.plugins.yaml import YamlParser
from expired_cert_finder.allowed_certs import AllowedCerts
rawParser = RawParser
yamlParser = YamlParser
# handle dynamic loading.
def scan_file_for_certificate(path, expired_only, debug):
results = []
try:
if path in AllowedCerts.instance().allowed_certs:
return results
file = open(path, "r")
file_contents = file.read()
run_default = True
certs = []
if path.endswith('.yaml') or path.endswith('.yml'):
try:
certs = yamlParser.process(path, file_contents, debug)
run_default = False
except Exception as ex:
if debug:
print("Error while using YAML Parser: " + path)
print(ex)
pass
if run_default:
certs = rawParser.process(path, file_contents, debug)
for cert in certs:
try:
cert_info = cert.getInfo()
if cert_info is not None and (cert_info['is_expired']
or (cert_info['close_to_expiry'] and expired_only == False)):
status = "EXPIRED" if cert_info['is_expired'] else "CLOSE_TO_EXPIRY"
cert_info['message'] = '%s, %s, %s: %s' % (cert.path, cert_info['subject'], status, cert_info['not_after'])
results.append(cert_info)
except Exception as e:
print(cert, e)
raise
except UnicodeDecodeError:
pass
except Exception as ex:
if debug:
print("File that caused exception: %s" % path)
raise ex
else:
print("Error processing %s, %s" % (path, ex))
return results
| [
"expired_cert_finder.allowed_certs.AllowedCerts.instance"
] | [((372, 395), 'expired_cert_finder.allowed_certs.AllowedCerts.instance', 'AllowedCerts.instance', ([], {}), '()\n', (393, 395), False, 'from expired_cert_finder.allowed_certs import AllowedCerts\n')] |
import unittest
from mock import Mock
from foundations_spec.helpers.spec import Spec
from foundations_spec.helpers import let, let_mock, set_up
class TestLazyBucket(Spec):
@let
def lazy_bucket(self):
from foundations_contrib.lazy_bucket import LazyBucket
return LazyBucket(self.bucket_constructor)
@set_up
def set_up(self):
self.bucket_constructor.return_value = self.bucket
bucket_constructor = let_mock()
bucket = let_mock()
name = let_mock()
data = let_mock()
input_file = let_mock()
output_file = let_mock()
dummy = let_mock()
pathname = let_mock()
source = let_mock()
destination = let_mock()
def test_ensure_bucket_is_not_constructed(self):
self.lazy_bucket
self.bucket_constructor.assert_not_called()
def test_upload_from_string_calls_bucket(self):
self.bucket.upload_from_string.return_value = self.dummy
result = self.lazy_bucket.upload_from_string(self.name, self.data)
self.bucket.upload_from_string.assert_called_with(self.name, self.data)
self.assertEqual(self.dummy, result)
def test_upload_from_file_calls_bucket(self):
self.bucket.upload_from_file.return_value = self.dummy
result = self.lazy_bucket.upload_from_file(self.name, self.input_file)
self.bucket.upload_from_file.assert_called_with(self.name, self.input_file)
self.assertEqual(self.dummy, result)
def test_exists_calls_bucket(self):
self.bucket.exists.return_value = self.dummy
result = self.lazy_bucket.exists(self.name)
self.bucket.exists.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_download_as_string_calls_bucket(self):
self.bucket.download_as_string.return_value = self.dummy
result = self.lazy_bucket.download_as_string(self.name)
self.bucket.download_as_string.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_download_to_file_calls_bucket(self):
self.bucket.download_to_file.return_value = self.dummy
result = self.lazy_bucket.download_to_file(self.name, self.output_file)
self.bucket.download_to_file.assert_called_with(self.name, self.output_file)
self.assertEqual(self.dummy, result)
def test_list_files_calls_bucket(self):
self.bucket.list_files.return_value = self.dummy
result = self.lazy_bucket.list_files(self.pathname)
self.bucket.list_files.assert_called_with(self.pathname)
self.assertEqual(self.dummy, result)
def test_remove_calls_bucket(self):
self.bucket.remove.return_value = self.dummy
result = self.lazy_bucket.remove(self.name)
self.bucket.remove.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_move_calls_bucket(self):
self.bucket.move.return_value = self.dummy
result = self.lazy_bucket.move(self.source, self.destination)
self.bucket.move.assert_called_with(self.source, self.destination)
self.assertEqual(self.dummy, result)
| [
"foundations_contrib.lazy_bucket.LazyBucket",
"foundations_spec.helpers.let_mock"
] | [((446, 456), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (454, 456), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((470, 480), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (478, 480), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((496, 506), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (504, 506), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((518, 528), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (526, 528), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((546, 556), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (554, 556), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((575, 585), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (583, 585), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((598, 608), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (606, 608), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((624, 634), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (632, 634), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((648, 658), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (656, 658), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((677, 687), 'foundations_spec.helpers.let_mock', 'let_mock', ([], {}), '()\n', (685, 687), False, 'from foundations_spec.helpers import let, let_mock, set_up\n'), ((290, 325), 'foundations_contrib.lazy_bucket.LazyBucket', 'LazyBucket', (['self.bucket_constructor'], {}), '(self.bucket_constructor)\n', (300, 325), False, 'from foundations_contrib.lazy_bucket import LazyBucket\n')] |
# Python Standard Libraries
import warnings
# grAdapt
# from .base import Equidistributed
from .MaximalMinDistance import MaximalMinDistance
class Mitchell(MaximalMinDistance):
"""
[Mitchell et al., 1991],
Spectrally optimal sampling for distribution ray tracing
"""
def __init__(self, m=3):
"""
Parameters
----------
m : integer
number of candidates = m * n
"""
warnings.warn('Mitchell\' best candidate has a time complexity of O(n^3) '
'and memory issues when dealing with higher sample numbers. '
'Use MaximalMinDistance instead which is an improved version '
'with linear time complexity.', ResourceWarning)
super().__init__(n_candidates=m, window_size=0)
self.candidates_set = False
def sample(self, bounds, n, x_history=None):
"""Samples low discrepancy/equidistributed sequences according to Mitchell.
Method has to handle with new bounds and n.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n : int
number of points to be sampled
x_history : array-like (2d)
History points. Consider those to prevent sampling in dense regions.
Returns
-------
array-like (n, len(bounds))
Returns a 2D array. dim is the dimension of a single point
Each row corresponds to a single point.
Each column corresponds to a dimension.
"""
if self.candidates_set is False:
self.n_candidates = self.n_candidates * n
self.candidates_set = True
return super().sample(bounds, n, x_history)
| [
"warnings.warn"
] | [((446, 694), 'warnings.warn', 'warnings.warn', (['"""Mitchell\' best candidate has a time complexity of O(n^3) and memory issues when dealing with higher sample numbers. Use MaximalMinDistance instead which is an improved version with linear time complexity."""', 'ResourceWarning'], {}), '(\n "Mitchell\' best candidate has a time complexity of O(n^3) and memory issues when dealing with higher sample numbers. Use MaximalMinDistance instead which is an improved version with linear time complexity."\n , ResourceWarning)\n', (459, 694), False, 'import warnings\n')] |
import numpy as np
class Material:
"""send color as list of 3 floats in range of 0-1"""
def __init__(self, color, reflection=0, transparency=0, emission=np.array((0, 0, 0)), refraction_ind=1):
self.color = np.array(color)
self.emission = emission # only for light sources
self.reflection = reflection
self.transparency = transparency
self.refraction_ind = refraction_ind
| [
"numpy.array"
] | [((164, 183), 'numpy.array', 'np.array', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (172, 183), True, 'import numpy as np\n'), ((225, 240), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (233, 240), True, 'import numpy as np\n')] |
import pandas as pd
from torch.utils.data import DataLoader
import multiprocessing as mp
import argparse
from DataSet import Dataset
import torch
import torch.nn as nn
from os import path
import Infer
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
cpu = torch.device('cpu')
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--seq_len', type=int, default=256)
parser.add_argument('--root_dir')
parser.add_argument('--tr_start_year', type=int, help='Training Start year')
parser.add_argument('--tr_final_year', type=int, help='Training Final year')
parser.add_argument('--val_start_year', type=int, help='Validation Start year')
parser.add_argument('--val_final_year', type=int, help='Validation Final year')
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--loss', default='mse', help='Choose from qr_loss,mse')
parser.add_argument('--gamma_list', nargs='*', type=float, help='All gammas to be predicted by 1 model')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--model', default='ar_net', help='Choose From ar_net, trfrmr, cnn_lstm, lstm')
parser.add_argument('--ini_len', type=int, default=18, help='Number of Columns in Data<i>.csv')
parser.add_argument('--final_len', type=int, default=1, help='Number of numbers your model will predict.')
parser.add_argument('--steps', type=int, default=1, help='How many step ahead do you want to predict?')
parser.add_argument('--optimizer', default='Adam', help='Choose from Adam and RAdam.')
parser.add_argument('--param_file', help='Path to file to store weights.May not exist.')
args = parser.parse_args()
b_sz = args.batch_size
n_wrkrs = mp.cpu_count()
seq_len = args.seq_len
epochs = args.epochs
tr_csv_paths = [args.root_dir+'/Data'+str(i)+'.csv' for i in range(args.tr_start_year, args.tr_final_year+1)]
val_csv_paths = [args.root_dir+'/Data'+str(i)+'.csv' for i in range(args.val_start_year, args.val_final_year+1)]
if args.gamma_list is not None and len(args.gamma_list)>1 and len(args.gamma_list)%2!=0 and args.loss=='qr_loss':
print('Invalid gamma list')
exit(0)
dataset_final_len = args.final_len #if args.loss!='qr_loss' else 1 #or len(args.gamma_list)<=1 else int(args.final_len/2)
model_final_len = args.final_len*len(args.gamma_list) if args.gamma_list!=None else args.final_len
train_dataset = Dataset.SRdata(tr_csv_paths, seq_len, steps=args.steps, final_len=dataset_final_len)
train_data_loader = DataLoader(train_dataset, batch_size = b_sz, num_workers=n_wrkrs, drop_last = True)
test_dataset = Dataset.SRdata(val_csv_paths, seq_len, steps=args.steps, final_len=dataset_final_len)
test_data_loader = DataLoader(test_dataset, batch_size = b_sz, num_workers=n_wrkrs, drop_last=True)
if args.loss=='mse' :
lossfn = nn.MSELoss().to(device)
elif args.loss=='qr_loss' :
maximum = nn.ReLU()
gamma_list_len = len(args.gamma_list)
gammas = torch.tensor(args.gamma_list, dtype=torch.float64, device=device)
gammas = gammas.repeat_interleave(args.final_len)
def qr_loss(tar, pred) :
if gamma_list_len!=1 :
tar = torch.cat([tar]*gamma_list_len,dim=1)
n = tar.shape[0]
m = tar.shape[1]
loss = (1-gammas)*maximum(tar-pred)+(gammas)*maximum(pred-tar)
return loss.sum()/(n*m)
lossfn = qr_loss
if args.model=='ar_net' :
from Models import AR_Net
t = AR_Net.ar_nt(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='cnn_lstm' :
from Models import CNN_LSTM
t = CNN_LSTM.cnn_lstm(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='trfrmr' :
from Models import Transformer
t = Transformer.trnsfrmr_nt(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='lstm' :
from Models import LSTM
t = LSTM.lstm(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='lstm_a' :
from Models import lstm_attention
t = LSTM.lstm_a(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
if path.exists(args.param_file) :
t.load_state_dict(torch.load(args.param_file))
if args.optimizer == 'RAdam' :
from optimizers import RAdam
optimizer = RAdam.RAdam(t.parameters(),lr=args.lr)
elif args.optimizer == 'Adam' :
optimizer = torch.optim.Adam(t.parameters(),lr=args.lr)
t = t.double()
train_mse = []
test_mse = [10000]
for ij in range(epochs) :
loss_list = []
for i, batch in enumerate(train_data_loader) :
optimizer.zero_grad()
in_batch = batch['in'].to(device)
out = t(in_batch)
loss = lossfn(batch['out'].to(device), out)
loss_list.append(loss)
loss.backward()
optimizer.step()
print('Avg. Training Loss in '+str(ij)+ 'th epoch :- ', sum(loss_list)/len(loss_list))
train_mse.append(sum(loss_list)/len(loss_list))
loss_list=[]
test_mse.append(Infer.evaluate(t, loss = args.loss, test_dataset=test_dataset, args_from_train=args))
if test_mse[-1]==min(test_mse) :
print('saving:- ', test_mse[-1])
torch.save(t.state_dict(),args.param_file)
| [
"Models.AR_Net.ar_nt",
"os.path.exists",
"torch.nn.ReLU",
"Models.Transformer.trnsfrmr_nt",
"Infer.evaluate",
"argparse.ArgumentParser",
"torch.load",
"multiprocessing.cpu_count",
"torch.tensor",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"Models.CNN_LSTM.cnn_lstm",
"Models.LSTM.lstm_a",... | [((280, 299), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (292, 299), False, 'import torch\n'), ((310, 335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (333, 335), False, 'import argparse\n'), ((1734, 1748), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1746, 1748), True, 'import multiprocessing as mp\n'), ((2421, 2510), 'DataSet.Dataset.SRdata', 'Dataset.SRdata', (['tr_csv_paths', 'seq_len'], {'steps': 'args.steps', 'final_len': 'dataset_final_len'}), '(tr_csv_paths, seq_len, steps=args.steps, final_len=\n dataset_final_len)\n', (2435, 2510), False, 'from DataSet import Dataset\n'), ((2526, 2605), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'b_sz', 'num_workers': 'n_wrkrs', 'drop_last': '(True)'}), '(train_dataset, batch_size=b_sz, num_workers=n_wrkrs, drop_last=True)\n', (2536, 2605), False, 'from torch.utils.data import DataLoader\n'), ((2626, 2716), 'DataSet.Dataset.SRdata', 'Dataset.SRdata', (['val_csv_paths', 'seq_len'], {'steps': 'args.steps', 'final_len': 'dataset_final_len'}), '(val_csv_paths, seq_len, steps=args.steps, final_len=\n dataset_final_len)\n', (2640, 2716), False, 'from DataSet import Dataset\n'), ((2731, 2809), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'b_sz', 'num_workers': 'n_wrkrs', 'drop_last': '(True)'}), '(test_dataset, batch_size=b_sz, num_workers=n_wrkrs, drop_last=True)\n', (2741, 2809), False, 'from torch.utils.data import DataLoader\n'), ((4225, 4253), 'os.path.exists', 'path.exists', (['args.param_file'], {}), '(args.param_file)\n', (4236, 4253), False, 'from os import path\n'), ((236, 261), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (259, 261), False, 'import torch\n'), ((2917, 2926), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2924, 2926), True, 'import torch.nn as nn\n'), ((2982, 3047), 'torch.tensor', 'torch.tensor', (['args.gamma_list'], {'dtype': 'torch.float64', 'device': 'device'}), '(args.gamma_list, dtype=torch.float64, device=device)\n', (2994, 3047), False, 'import torch\n'), ((4278, 4305), 'torch.load', 'torch.load', (['args.param_file'], {}), '(args.param_file)\n', (4288, 4305), False, 'import torch\n'), ((5076, 5162), 'Infer.evaluate', 'Infer.evaluate', (['t'], {'loss': 'args.loss', 'test_dataset': 'test_dataset', 'args_from_train': 'args'}), '(t, loss=args.loss, test_dataset=test_dataset,\n args_from_train=args)\n', (5090, 5162), False, 'import Infer\n'), ((2849, 2861), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2859, 2861), True, 'import torch.nn as nn\n'), ((3458, 3536), 'Models.AR_Net.ar_nt', 'AR_Net.ar_nt', ([], {'seq_len': 'seq_len', 'ini_len': 'args.ini_len', 'final_len': 'model_final_len'}), '(seq_len=seq_len, ini_len=args.ini_len, final_len=model_final_len)\n', (3470, 3536), False, 'from Models import AR_Net\n'), ((3180, 3220), 'torch.cat', 'torch.cat', (['([tar] * gamma_list_len)'], {'dim': '(1)'}), '([tar] * gamma_list_len, dim=1)\n', (3189, 3220), False, 'import torch\n'), ((3625, 3713), 'Models.CNN_LSTM.cnn_lstm', 'CNN_LSTM.cnn_lstm', ([], {'seq_len': 'seq_len', 'ini_len': 'args.ini_len', 'final_len': 'model_final_len'}), '(seq_len=seq_len, ini_len=args.ini_len, final_len=\n model_final_len)\n', (3642, 3713), False, 'from Models import CNN_LSTM\n'), ((3794, 3888), 'Models.Transformer.trnsfrmr_nt', 'Transformer.trnsfrmr_nt', ([], {'seq_len': 'seq_len', 'ini_len': 'args.ini_len', 'final_len': 'model_final_len'}), '(seq_len=seq_len, ini_len=args.ini_len, final_len=\n model_final_len)\n', (3817, 3888), False, 'from Models import Transformer\n'), ((3960, 4035), 'Models.LSTM.lstm', 'LSTM.lstm', ([], {'seq_len': 'seq_len', 'ini_len': 'args.ini_len', 'final_len': 'model_final_len'}), '(seq_len=seq_len, ini_len=args.ini_len, final_len=model_final_len)\n', (3969, 4035), False, 'from Models import LSTM\n'), ((4127, 4204), 'Models.LSTM.lstm_a', 'LSTM.lstm_a', ([], {'seq_len': 'seq_len', 'ini_len': 'args.ini_len', 'final_len': 'model_final_len'}), '(seq_len=seq_len, ini_len=args.ini_len, final_len=model_final_len)\n', (4138, 4204), False, 'from Models import LSTM\n')] |
#!/usr/bin/env python3
import re
import sys
import io
import os
import traceback
class TextPreprocessorError(Exception):
def __init__(self, file_id, line, msg):
super().__init__(f"{file_id}:{line}: {msg}")
def debug_print(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def preprocess(
file: io.TextIOWrapper,
definitions: set[str],
file_id: str = "<stdin>",
):
import parser
parser.parse_exec(file, definitions, file_id)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Text preprocessor")
parser.add_argument(
"-D", dest="definitions", action="append", help="Set definition"
)
parser.add_argument(
"--deffile",
dest="deffile",
help="Set definition file, the definition file is a python file that defines a definitions iterable that contains definitions",
)
parser.add_argument(
"file", nargs="?", default=None, help="Input file (stdin by default)"
)
args = parser.parse_args()
definitions = set()
if args.deffile:
import importlib
deffile = importlib.machinery.SourceFileLoader(
"deffile", os.path.realpath(args.deffile)
)
deffile = deffile.load_module()
definitions |= set(deffile.definitions)
if args.definitions:
definitions |= set(args.definitions)
file = sys.stdin if args.file is None else open(args.file, "r")
file_id = "<stdin>" if args.file is None else args.file
try:
preprocess(
file,
definitions,
file_id
)
except:
traceback.print_exc()
print("Failed to parse / execute file", file_id, file=sys.stderr)
exit(3)
finally:
if args.file is not None:
file.close()
| [
"parser.parse_args",
"argparse.ArgumentParser",
"os.path.realpath",
"parser.add_argument",
"parser.parse_exec",
"traceback.print_exc"
] | [((430, 475), 'parser.parse_exec', 'parser.parse_exec', (['file', 'definitions', 'file_id'], {}), '(file, definitions, file_id)\n', (447, 475), False, 'import parser\n'), ((539, 595), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Text preprocessor"""'}), "(description='Text preprocessor')\n", (562, 595), False, 'import argparse\n'), ((600, 690), 'parser.add_argument', 'parser.add_argument', (['"""-D"""'], {'dest': '"""definitions"""', 'action': '"""append"""', 'help': '"""Set definition"""'}), "('-D', dest='definitions', action='append', help=\n 'Set definition')\n", (619, 690), False, 'import parser\n'), ((704, 890), 'parser.add_argument', 'parser.add_argument', (['"""--deffile"""'], {'dest': '"""deffile"""', 'help': '"""Set definition file, the definition file is a python file that defines a definitions iterable that contains definitions"""'}), "('--deffile', dest='deffile', help=\n 'Set definition file, the definition file is a python file that defines a definitions iterable that contains definitions'\n )\n", (723, 890), False, 'import parser\n'), ((916, 1011), 'parser.add_argument', 'parser.add_argument', (['"""file"""'], {'nargs': '"""?"""', 'default': 'None', 'help': '"""Input file (stdin by default)"""'}), "('file', nargs='?', default=None, help=\n 'Input file (stdin by default)')\n", (935, 1011), False, 'import parser\n'), ((1033, 1052), 'parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (1050, 1052), False, 'import parser\n'), ((1205, 1235), 'os.path.realpath', 'os.path.realpath', (['args.deffile'], {}), '(args.deffile)\n', (1221, 1235), False, 'import os\n'), ((1656, 1677), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1675, 1677), False, 'import traceback\n')] |
from datetime import datetime, timezone
from io import StringIO
from unittest import mock
import freezegun
import pytest
from django.conf import settings
from django.core.management import call_command
from django.utils import timezone
from model_bakery import baker
from supportal.app.common.enums import CanvassResult
from supportal.app.models import EmailSend
CREATED_AT = datetime(2019, 10, 26, 1, tzinfo=timezone.utc)
CREATED_AT_EARLIER = datetime(2019, 10, 26, tzinfo=timezone.utc)
DAY_BEFORE_EXPIRE = datetime(2019, 11, 1, tzinfo=timezone.utc)
TWO_DAY_BEFORE_EXPIRE = datetime(2019, 10, 31, tzinfo=timezone.utc)
EXPIRED_AT = datetime(2019, 11, 2, 1, tzinfo=timezone.utc)
EXPIRED_EARLIER = datetime(2019, 11, 2, tzinfo=timezone.utc)
AFTER_EXPIRATION_DATE = datetime(2019, 11, 3, tzinfo=timezone.utc)
SIX_DAYS_BEFORE_EXPIRE = datetime(2019, 10, 27, tzinfo=timezone.utc)
def email_expiring_users(*args, **kwargs):
call_command("email_users_with_expiring_assignments", **kwargs)
@pytest.fixture
def first_cambridge_assignment(cambridge_leader_user, cambridge_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=cambridge_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def hayes_assignment(hayes_valley_leader_user, california_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=california_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT_EARLIER
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def hayes_cambrdige_assignment(hayes_valley_leader_user, cambridge_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=cambridge_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def second_cambridge_assignment(cambridge_leader_user, california_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=california_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def expired_assignment(cambridge_leader_user, somerville_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=somerville_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.expired_at = EXPIRED_AT
cambridge_assignment.save()
return cambridge_assignment
DEFAULT_TEMPLATE_DATA = {
"assignment_count": "",
"email": "",
"expiration_date": "",
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": "",
"last_name": "",
}
def make_payload(assignment_count, email, expiration, first_name, last_name):
return {
"assignment_count": assignment_count,
"email": email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": first_name,
"last_name": last_name,
}
def check_email_sends(user, assignment_count, expiration, single_call_mock=None):
assert EmailSend.objects.filter(user=user).count() == 1
email_sent = EmailSend.objects.get(user=user)
assert email_sent.template_name == "expiring_contacts_email"
assert email_sent.payload == {
"assignment_count": assignment_count,
"email": user.email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": user.first_name,
"last_name": user.last_name,
}
if single_call_mock:
single_call_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
assignment_count,
user.email,
expiration,
user.first_name,
user.last_name,
)
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_uncontacted_assignments(
first_cambridge_assignment, expired_assignment
):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dryrun(first_cambridge_assignment, expired_assignment):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert first_cambridge_assignment.user.email in out.getvalue()
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(DAY_BEFORE_EXPIRE)
def test_dont_email_outside_of_two_days(first_cambridge_assignment, expired_assignment):
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_assignments(
first_cambridge_assignment, second_cambridge_assignment, expired_assignment
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 2, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
check_email_sends(hayes_assignment.user, 2, EXPIRED_EARLIER)
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
first_cambridge_assignment.user.email,
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
hayes_assignment.user.email,
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_send_all_to_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(
stdout=out, send=True, send_all_to="<EMAIL>"
)
assert EmailSend.objects.all().count() == 0
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
"<EMAIL>",
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
"<EMAIL>",
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_limit_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, limit=1, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_unsuccessfully_contacted_assignments(
first_cambridge_assignment, expired_assignment
):
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.save()
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_unsubscribed_user(first_cambridge_assignment, expired_assignment):
first_cambridge_assignment.user.unsubscribed_at = datetime.now(tz=timezone.utc)
first_cambridge_assignment.user.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_user_who_was_emailed_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.EXPIRING_PROSPECTS,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 1
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_user_who_was_invited_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.INVITE_EMAIL,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 2
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_successfully_contacted_dont_email(
first_cambridge_assignment, expired_assignment
):
# Make sure that having a previous unsuccessful contact event doesn't cause
# the contact to get expired.
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.create_contact_event(
result=CanvassResult.SUCCESSFUL_CANVASSED
)
first_cambridge_assignment.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
def test_expire_zero_assignments():
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
| [
"datetime.datetime",
"model_bakery.baker.make",
"django.core.management.call_command",
"supportal.app.models.EmailSend.objects.filter",
"supportal.app.models.EmailSend.objects.create",
"io.StringIO",
"datetime.datetime.now",
"supportal.app.models.EmailSend.objects.get",
"supportal.app.models.EmailSe... | [((379, 425), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(26)', '(1)'], {'tzinfo': 'timezone.utc'}), '(2019, 10, 26, 1, tzinfo=timezone.utc)\n', (387, 425), False, 'from datetime import datetime, timezone\n'), ((447, 490), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(26)'], {'tzinfo': 'timezone.utc'}), '(2019, 10, 26, tzinfo=timezone.utc)\n', (455, 490), False, 'from datetime import datetime, timezone\n'), ((511, 553), 'datetime.datetime', 'datetime', (['(2019)', '(11)', '(1)'], {'tzinfo': 'timezone.utc'}), '(2019, 11, 1, tzinfo=timezone.utc)\n', (519, 553), False, 'from datetime import datetime, timezone\n'), ((578, 621), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(31)'], {'tzinfo': 'timezone.utc'}), '(2019, 10, 31, tzinfo=timezone.utc)\n', (586, 621), False, 'from datetime import datetime, timezone\n'), ((635, 680), 'datetime.datetime', 'datetime', (['(2019)', '(11)', '(2)', '(1)'], {'tzinfo': 'timezone.utc'}), '(2019, 11, 2, 1, tzinfo=timezone.utc)\n', (643, 680), False, 'from datetime import datetime, timezone\n'), ((699, 741), 'datetime.datetime', 'datetime', (['(2019)', '(11)', '(2)'], {'tzinfo': 'timezone.utc'}), '(2019, 11, 2, tzinfo=timezone.utc)\n', (707, 741), False, 'from datetime import datetime, timezone\n'), ((766, 808), 'datetime.datetime', 'datetime', (['(2019)', '(11)', '(3)'], {'tzinfo': 'timezone.utc'}), '(2019, 11, 3, tzinfo=timezone.utc)\n', (774, 808), False, 'from datetime import datetime, timezone\n'), ((834, 877), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(27)'], {'tzinfo': 'timezone.utc'}), '(2019, 10, 27, tzinfo=timezone.utc)\n', (842, 877), False, 'from datetime import datetime, timezone\n'), ((4643, 4687), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (4664, 4687), False, 'import freezegun\n'), ((5357, 5401), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (5378, 5401), False, 'import freezegun\n'), ((5990, 6030), 'freezegun.freeze_time', 'freezegun.freeze_time', (['DAY_BEFORE_EXPIRE'], {}), '(DAY_BEFORE_EXPIRE)\n', (6011, 6030), False, 'import freezegun\n'), ((6406, 6450), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (6427, 6450), False, 'import freezegun\n'), ((7004, 7048), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (7025, 7048), False, 'import freezegun\n'), ((8549, 8593), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (8570, 8593), False, 'import freezegun\n'), ((9975, 10019), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (9996, 10019), False, 'import freezegun\n'), ((10584, 10628), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (10605, 10628), False, 'import freezegun\n'), ((11321, 11365), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (11342, 11365), False, 'import freezegun\n'), ((11866, 11910), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (11887, 11910), False, 'import freezegun\n'), ((12524, 12568), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (12545, 12568), False, 'import freezegun\n'), ((13302, 13346), 'freezegun.freeze_time', 'freezegun.freeze_time', (['TWO_DAY_BEFORE_EXPIRE'], {}), '(TWO_DAY_BEFORE_EXPIRE)\n', (13323, 13346), False, 'import freezegun\n'), ((927, 990), 'django.core.management.call_command', 'call_command', (['"""email_users_with_expiring_assignments"""'], {}), "('email_users_with_expiring_assignments', **kwargs)\n", (939, 990), False, 'from django.core.management import call_command\n'), ((1111, 1206), 'model_bakery.baker.make', 'baker.make', (['"""VolProspectAssignment"""'], {'user': 'cambridge_leader_user', 'person': 'cambridge_prospect'}), "('VolProspectAssignment', user=cambridge_leader_user, person=\n cambridge_prospect)\n", (1121, 1206), False, 'from model_bakery import baker\n'), ((1446, 1545), 'model_bakery.baker.make', 'baker.make', (['"""VolProspectAssignment"""'], {'user': 'hayes_valley_leader_user', 'person': 'california_prospect'}), "('VolProspectAssignment', user=hayes_valley_leader_user, person=\n california_prospect)\n", (1456, 1545), False, 'from model_bakery import baker\n'), ((1828, 1926), 'model_bakery.baker.make', 'baker.make', (['"""VolProspectAssignment"""'], {'user': 'hayes_valley_leader_user', 'person': 'cambridge_prospect'}), "('VolProspectAssignment', user=hayes_valley_leader_user, person=\n cambridge_prospect)\n", (1838, 1926), False, 'from model_bakery import baker\n'), ((2197, 2293), 'model_bakery.baker.make', 'baker.make', (['"""VolProspectAssignment"""'], {'user': 'cambridge_leader_user', 'person': 'california_prospect'}), "('VolProspectAssignment', user=cambridge_leader_user, person=\n california_prospect)\n", (2207, 2293), False, 'from model_bakery import baker\n'), ((2529, 2625), 'model_bakery.baker.make', 'baker.make', (['"""VolProspectAssignment"""'], {'user': 'cambridge_leader_user', 'person': 'somerville_prospect'}), "('VolProspectAssignment', user=cambridge_leader_user, person=\n somerville_prospect)\n", (2539, 2625), False, 'from model_bakery import baker\n'), ((3522, 3554), 'supportal.app.models.EmailSend.objects.get', 'EmailSend.objects.get', ([], {'user': 'user'}), '(user=user)\n', (3543, 3554), False, 'from supportal.app.models import EmailSend\n'), ((4797, 4807), 'io.StringIO', 'StringIO', ([], {}), '()\n', (4805, 4807), False, 'from io import StringIO\n'), ((5477, 5487), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5485, 5487), False, 'from io import StringIO\n'), ((6130, 6140), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6138, 6140), False, 'from io import StringIO\n'), ((6581, 6591), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6589, 6591), False, 'from io import StringIO\n'), ((7203, 7213), 'io.StringIO', 'StringIO', ([], {}), '()\n', (7211, 7213), False, 'from io import StringIO\n'), ((8765, 8775), 'io.StringIO', 'StringIO', ([], {}), '()\n', (8773, 8775), False, 'from io import StringIO\n'), ((10185, 10195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (10193, 10195), False, 'from io import StringIO\n'), ((10897, 10907), 'io.StringIO', 'StringIO', ([], {}), '()\n', (10905, 10907), False, 'from io import StringIO\n'), ((11507, 11536), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (11519, 11536), False, 'from datetime import datetime, timezone\n'), ((11590, 11600), 'io.StringIO', 'StringIO', ([], {}), '()\n', (11598, 11600), False, 'from io import StringIO\n'), ((12020, 12142), 'supportal.app.models.EmailSend.objects.create', 'EmailSend.objects.create', ([], {'user': 'first_cambridge_assignment.user', 'template_name': 'EmailSend.EXPIRING_PROSPECTS', 'payload': '{}'}), '(user=first_cambridge_assignment.user,\n template_name=EmailSend.EXPIRING_PROSPECTS, payload={})\n', (12044, 12142), False, 'from supportal.app.models import EmailSend\n'), ((12248, 12258), 'io.StringIO', 'StringIO', ([], {}), '()\n', (12256, 12258), False, 'from io import StringIO\n'), ((12673, 12789), 'supportal.app.models.EmailSend.objects.create', 'EmailSend.objects.create', ([], {'user': 'first_cambridge_assignment.user', 'template_name': 'EmailSend.INVITE_EMAIL', 'payload': '{}'}), '(user=first_cambridge_assignment.user,\n template_name=EmailSend.INVITE_EMAIL, payload={})\n', (12697, 12789), False, 'from supportal.app.models import EmailSend\n'), ((12895, 12905), 'io.StringIO', 'StringIO', ([], {}), '()\n', (12903, 12905), False, 'from io import StringIO\n'), ((13830, 13840), 'io.StringIO', 'StringIO', ([], {}), '()\n', (13838, 13840), False, 'from io import StringIO\n'), ((14113, 14123), 'io.StringIO', 'StringIO', ([], {}), '()\n', (14121, 14123), False, 'from io import StringIO\n'), ((4905, 4984), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (4915, 4984), False, 'from unittest import mock\n'), ((5585, 5664), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (5595, 5664), False, 'from unittest import mock\n'), ((6601, 6680), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (6611, 6680), False, 'from unittest import mock\n'), ((7223, 7302), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (7233, 7302), False, 'from unittest import mock\n'), ((8785, 8864), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (8795, 8864), False, 'from unittest import mock\n'), ((10205, 10284), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (10215, 10284), False, 'from unittest import mock\n'), ((10918, 10997), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (10928, 10997), False, 'from unittest import mock\n'), ((12916, 12995), 'unittest.mock.patch', 'mock.patch', (['"""supportal.app.management.commands.base_email_command.EmailService"""'], {}), "('supportal.app.management.commands.base_email_command.EmailService')\n", (12926, 12995), False, 'from unittest import mock\n'), ((3456, 3491), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (3480, 3491), False, 'from supportal.app.models import EmailSend\n'), ((4819, 4881), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'first_cambridge_assignment.user'}), '(user=first_cambridge_assignment.user)\n', (4843, 4881), False, 'from supportal.app.models import EmailSend\n'), ((5135, 5158), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (5156, 5158), False, 'from supportal.app.models import EmailSend\n'), ((5499, 5561), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'first_cambridge_assignment.user'}), '(user=first_cambridge_assignment.user)\n', (5523, 5561), False, 'from supportal.app.models import EmailSend\n'), ((5804, 5827), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (5825, 5827), False, 'from supportal.app.models import EmailSend\n'), ((6201, 6224), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (6222, 6224), False, 'from supportal.app.models import EmailSend\n'), ((6249, 6311), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'first_cambridge_assignment.user'}), '(user=first_cambridge_assignment.user)\n', (6273, 6311), False, 'from supportal.app.models import EmailSend\n'), ((6782, 6805), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (6803, 6805), False, 'from supportal.app.models import EmailSend\n'), ((7404, 7427), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (7425, 7427), False, 'from supportal.app.models import EmailSend\n'), ((9011, 9034), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (9032, 9034), False, 'from supportal.app.models import EmailSend\n'), ((10395, 10418), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (10416, 10418), False, 'from supportal.app.models import EmailSend\n'), ((11099, 11122), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (11120, 11122), False, 'from supportal.app.models import EmailSend\n'), ((11661, 11684), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (11682, 11684), False, 'from supportal.app.models import EmailSend\n'), ((11709, 11771), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'first_cambridge_assignment.user'}), '(user=first_cambridge_assignment.user)\n', (11733, 11771), False, 'from supportal.app.models import EmailSend\n'), ((12319, 12342), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (12340, 12342), False, 'from supportal.app.models import EmailSend\n'), ((12367, 12429), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'first_cambridge_assignment.user'}), '(user=first_cambridge_assignment.user)\n', (12391, 12429), False, 'from supportal.app.models import EmailSend\n'), ((13097, 13120), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (13118, 13120), False, 'from supportal.app.models import EmailSend\n'), ((13145, 13207), 'supportal.app.models.EmailSend.objects.filter', 'EmailSend.objects.filter', ([], {'user': 'first_cambridge_assignment.user'}), '(user=first_cambridge_assignment.user)\n', (13169, 13207), False, 'from supportal.app.models import EmailSend\n'), ((13950, 13973), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (13971, 13973), False, 'from supportal.app.models import EmailSend\n'), ((14184, 14207), 'supportal.app.models.EmailSend.objects.all', 'EmailSend.objects.all', ([], {}), '()\n', (14205, 14207), False, 'from supportal.app.models import EmailSend\n')] |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from eplus.envs import pyEp
import socket
from eplus.envs.socket_builder import socket_builder
import numpy as np
import os
class DataCenterEnv(gym.Env):
def __init__(self, config):
#timestep=12, days=1, eplus_path=None,
# weather_file = 'weather/SPtMasterTable_587017_2012_amy.epw'):
cur_dir = os.path.dirname(__file__)
#print("File directory: ", cur_dir)
# buildings/1ZoneDataCenter/1ZoneDataCenter.idf is the EnergyPlus file
# used for this environment. The 1ZoneDataCenter folder also contains
# variables.cfg which configures the external input and output
# variables
self.idf_file = cur_dir + '/buildings/1ZoneDataCenter/1ZoneDataCenter.idf'
# EnergyPlus weather file
if "weather_file" in config:
self.weather_file = cur_dir + '/' + config["weather_file"]
else:
self.weather_file = cur_dir + '/weather/SPtMasterTable_587017_2012_amy.epw'
#self.weather_file = cur_dir + '/weather/SPtMasterTable_587017_2012_amy.epw'
if "eplus_path" in config:
self.eplus_path = config["eplus_path"]
else:
# Using EnergyPlus version 8.80, path to the executable
# Assuming Mac
self.eplus_path = '/Applications/EnergyPlus-8-8-0/'
# EnergyPlus number of timesteps in an hour
if "timestep" in config:
self.epTimeStep = config["timestep"]
else:
self.epTimeStep = 12
# EnergyPlus number of simulation days
if "days" in config:
self.simDays = config["days"]
else:
self.simDays = 1
# Number of steps per day
self.DAYSTEPS = int(24 * self.epTimeStep)
# Total number of steps
self.MAXSTEPS = int(self.simDays * self.DAYSTEPS)
# Time difference between each step in seconds
self.deltaT = (60/self.epTimeStep)*60
# Outputs given by EnergyPlus, defined in variables.cfg
self.outputs = []
# Inputs expected by EnergyPlus, defined in variables.cfg
self.inputs = []
# Current step of the simulation
self.kStep = 0
# Instance of EnergyPlus simulation
self.ep = None
# state can be all the inputs required to make a control decision
# getting all the outputs coming from EnergyPlus for the time being
self.observation_space = spaces.Box(np.array([0, -50, 0]), #zone temp, outdoor drybulb temp, relative humidity
np.array([60, 70, 100]), dtype=np.float32)
# actions are all the control inputs
#self.action_space = spaces.Tuple(( #spaces.Box(low=22, high=27, shape=(1,),dtype=np.float32), #cooling setpoint
# spaces.Box(low=6, high=7, shape=(1,),dtype=np.float32), #chiller setpoint
# spaces.Box(low=0, high=1, shape=(1,),dtype=np.float32) #lighting setpoint
# ))
self.clg_min = 20 #cooling setpoint min in celcius
self.clg_max = 35 #cooling setpoint max in celcius
self.htg_min = 5 #heating setpoint min in celcius
self.htg_max = 20 #heating setpoint max in celcius
#self.action_space = spaces.Box(np.array([self.clg_min,self.htg_min]),
# np.array([self.clg_max, self.htg_max]), dtype=np.float32)
# Normalized action space
self.action_space = spaces.Box(np.array([0,0]),
np.array([1,1]), dtype=np.float32)
def step(self, action):
# while(self.kStep < self.MAXSTEPS):
# current time from start of simulation
time = self.kStep * self.deltaT
# current time from start of day
dayTime = time % 86400
if dayTime == 0:
print("Day: ", int(self.kStep/self.DAYSTEPS)+1)
#inputs should be same as actions
#bring the actions in the correct range
#For Ray: assuming mean 0 and std dev 1 by ray
#action[0] = action[0]*(self.clg_max - self.clg_min)+(self.clg_min+self.clg_max)/2.0
#action[1] = action[1]*(self.htg_max - self.htg_min)+(self.htg_min+self.htg_max)/2.0
#For Coach: input is 0 to 1 range
action[0] = action[0]*(self.clg_max - self.clg_min)+(self.clg_min)
action[1] = action[1]*(self.htg_max - self.htg_min)+(self.htg_min)
#force action to be within limits
cooling_setpoint = np.clip(action, self.clg_min, self.clg_max)[0]
heating_setpoint = np.clip(action, self.htg_min, self.htg_max)[1]
self.inputs = [cooling_setpoint, heating_setpoint]
input_packet = self.ep.encode_packet_simple(self.inputs, time)
self.ep.write(input_packet)
#after EnergyPlus runs the simulation step, it returns the outputs
output_packet = self.ep.read()
self.outputs = self.ep.decode_packet_simple(output_packet)
#print("Outputs:", self.outputs)
if not self.outputs:
print("Outputs:", self.outputs)
print("Actions:", action)
next_state = self.reset()
return next_state, 0, False, {}
# reward needs to be a combination of energy and comfort requirement
energy_coeff = -0.00001
heating_coeff = -100
cooling_coeff = -100
energy = self.outputs[0]
zone_temperature = self.outputs[1] #taking mid-zone 2 as an example
heating_setpoint = 15 #fixed lower limit in celcius
cooling_setpoint = 30 #fixed upper limit in celcius
heating_penalty = max(heating_setpoint - zone_temperature, 0)
cooling_penalty = max(zone_temperature - cooling_setpoint, 0)
# punish if action is out of limits
action_penalty_coeff = -100
max_penalty = max(self.clg_min - action[0], 0)
min_penalty = max(action[0] - self.clg_max, 0)
action_penalty = action_penalty_coeff * (max_penalty + min_penalty)
max_penalty = max(self.htg_min - action[1], 0)
min_penalty = max(action[1] - self.htg_max, 0)
action_penalty += action_penalty_coeff * (max_penalty + min_penalty)
# final reward
reward = energy_coeff * energy \
+ heating_coeff * heating_penalty \
+ cooling_coeff * cooling_penalty \
+ action_penalty
# state can be all the inputs required to make a control decision
# zone temp, outside drybulb temp, outside wetbulb temp, relative humidity
next_state = np.array([self.outputs[1], self.outputs[2], self.outputs[4]])
# fake state space
#next_state = np.array([3, 2, 1, 0])
#print("energy: %.2f, reward: %.2f, action: %.2f, %.2f" \
# % (energy, reward, action[0], action[1]))
#print("zone temp: %.2f, drybulb: %.2f, humidity: %.2f"\
# %tuple(next_state))
# increment simulation step count
self.kStep += 1
# done when number of steps of simulation reaches its maximum (e.g. 1 day)
done = False
if self.kStep >= (self.MAXSTEPS):
#requires one more step to close the simulation
input_packet = self.ep.encode_packet_simple(self.inputs, time)
self.ep.write(input_packet)
#output is empty in the final step
#but it is required to read this output for termination
output_packet = self.ep.read()
last_output = self.ep.decode_packet_simple(output_packet)
print("Finished simulation")
print("Last action: ", action)
print("Last reward: ", reward)
done = True
self.ep.close()
self.ep = None
# extra information we want to pass
info = {}
# print("State:", next_state, "Reward:", reward)
return next_state, reward, done, info
def reset(self):
# stop existing energyplus simulation
if self.ep:
print("Closing the old simulation and socket.")
self.ep.close() #needs testing: check if it stops the simulation
self.ep = None
# start new simulation
print("Starting a new simulation..")
self.kStep = 0
idf_dir = os.path.dirname(self.idf_file)
builder = socket_builder(idf_dir)
configs = builder.build()
self.ep = pyEp.ep_process('localhost', configs[0], self.idf_file, self.weather_file, self.eplus_path)
# read the initial outputs from EnergyPlus
# these outputs are from warmup phase, so this does not count as a simulation step
self.outputs = self.ep.decode_packet_simple(self.ep.read())
return np.array([self.outputs[1], self.outputs[2], self.outputs[4]])
#return np.array([3,2,1,0])
def render(self, mode='human', close=False):
pass
| [
"eplus.envs.pyEp.ep_process",
"numpy.clip",
"eplus.envs.socket_builder.socket_builder",
"os.path.dirname",
"numpy.array"
] | [((425, 450), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (440, 450), False, 'import os\n'), ((6761, 6822), 'numpy.array', 'np.array', (['[self.outputs[1], self.outputs[2], self.outputs[4]]'], {}), '([self.outputs[1], self.outputs[2], self.outputs[4]])\n', (6769, 6822), True, 'import numpy as np\n'), ((8513, 8543), 'os.path.dirname', 'os.path.dirname', (['self.idf_file'], {}), '(self.idf_file)\n', (8528, 8543), False, 'import os\n'), ((8562, 8585), 'eplus.envs.socket_builder.socket_builder', 'socket_builder', (['idf_dir'], {}), '(idf_dir)\n', (8576, 8585), False, 'from eplus.envs.socket_builder import socket_builder\n'), ((8640, 8735), 'eplus.envs.pyEp.ep_process', 'pyEp.ep_process', (['"""localhost"""', 'configs[0]', 'self.idf_file', 'self.weather_file', 'self.eplus_path'], {}), "('localhost', configs[0], self.idf_file, self.weather_file,\n self.eplus_path)\n", (8655, 8735), False, 'from eplus.envs import pyEp\n'), ((8958, 9019), 'numpy.array', 'np.array', (['[self.outputs[1], self.outputs[2], self.outputs[4]]'], {}), '([self.outputs[1], self.outputs[2], self.outputs[4]])\n', (8966, 9019), True, 'import numpy as np\n'), ((2563, 2584), 'numpy.array', 'np.array', (['[0, -50, 0]'], {}), '([0, -50, 0])\n', (2571, 2584), True, 'import numpy as np\n'), ((2682, 2705), 'numpy.array', 'np.array', (['[60, 70, 100]'], {}), '([60, 70, 100])\n', (2690, 2705), True, 'import numpy as np\n'), ((3657, 3673), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3665, 3673), True, 'import numpy as np\n'), ((3709, 3725), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (3717, 3725), True, 'import numpy as np\n'), ((4671, 4714), 'numpy.clip', 'np.clip', (['action', 'self.clg_min', 'self.clg_max'], {}), '(action, self.clg_min, self.clg_max)\n', (4678, 4714), True, 'import numpy as np\n'), ((4745, 4788), 'numpy.clip', 'np.clip', (['action', 'self.htg_min', 'self.htg_max'], {}), '(action, self.htg_min, self.htg_max)\n', (4752, 4788), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import json
from msfbe.webmodel import BaseHandler, service_handler
import requests
import psycopg2
class CountiesColumns:
COUNTY_ID = 0
NAME = 1
AREA = 2
PERIMETER = 3
CACOA = 4
CACOA_ID = 5
DSSLV = 6
CONUM = 7
class SimpleResult(object):
def __init__(self, result):
self.result = result
def toJson(self):
return json.dumps(self.result)
@service_handler
class CountiesHandlerImpl(BaseHandler):
name = "Counties Service"
path = "/counties"
description = ""
params = {}
singleton = True
def __init__(self):
BaseHandler.__init__(self)
def __query(self, config, maxLat, maxLon, minLat, minLon):
sql = """
select
county_id,
name,
area,
perimeter,
cacoa,
cacoa_id,
dsslv,
conum
from
counties as c
where
ST_Intersects(c.county_shape, ST_MakeEnvelope(%s, %s, %s, %s, 4326));
"""
conn = psycopg2.connect(dbname=config.get("database", "db.database"),
user=config.get("database", "db.username"),
password=config.get("database", "<PASSWORD>"),
host=config.get("database", "db.endpoint"),
port=config.get("database", "db.port"))
cur = conn.cursor()
# Query
cur.execute(sql,
(
minLon,
minLat,
maxLon,
maxLat
)
)
results = cur.fetchall()
cur.close()
conn.close()
return results
def __format_results(self, rows):
results = []
for row in rows:
results.append({
"county_id": row[CountiesColumns.COUNTY_ID],
"name": row[CountiesColumns.NAME],
"area": row[CountiesColumns.AREA],
"perimeter": row[CountiesColumns.PERIMETER],
"cacoa": row[CountiesColumns.CACOA],
"cacoa_id": row[CountiesColumns.CACOA_ID],
"dsslv": row[CountiesColumns.DSSLV],
"conum": row[CountiesColumns.CONUM],
})
return results
def handle(self, computeOptions, **args):
maxLat = computeOptions.get_decimal_arg("maxLat", 90)
maxLon = computeOptions.get_decimal_arg("maxLon", 180)
minLat = computeOptions.get_decimal_arg("minLat", -90)
minLon = computeOptions.get_decimal_arg("minLon", -180)
rows = self.__query(args["webconfig"], maxLat, maxLon, minLat, minLon)
results = self.__format_results(rows)
return SimpleResult(results)
| [
"json.dumps",
"msfbe.webmodel.BaseHandler.__init__"
] | [((486, 509), 'json.dumps', 'json.dumps', (['self.result'], {}), '(self.result)\n', (496, 509), False, 'import json\n'), ((712, 738), 'msfbe.webmodel.BaseHandler.__init__', 'BaseHandler.__init__', (['self'], {}), '(self)\n', (732, 738), False, 'from msfbe.webmodel import BaseHandler, service_handler\n')] |
import time
def tic():
#Homemade version of matlab tic and toc functions
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
if 'startTime_for_tictoc' in globals():
print("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print("Toc: start time not set") | [
"time.time"
] | [((138, 149), 'time.time', 'time.time', ([], {}), '()\n', (147, 149), False, 'import time\n'), ((246, 257), 'time.time', 'time.time', ([], {}), '()\n', (255, 257), False, 'import time\n')] |
'''
* This Software is under the MIT License
* Refer to LICENSE or https://opensource.org/licenses/MIT for more information
* Written by ©<NAME> 2020
'''
from skimage import img_as_float
from skimage import io, color, morphology
import matplotlib.pyplot as plt
def get_skeleton_and_thin(input_image):
image = img_as_float(color.rgb2gray(io.imread(input_image)))
#image = img_as_float(color.rgb2gray(input_image))
image_binary = image < 0.5
out_skeletonize = morphology.skeletonize(image_binary)
out_thin = morphology.thin(image_binary)
f, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(10, 3))
plt.imsave('thinned_output.png', 255-out_thin,cmap='gray')
plt.imsave('skeletonized_output.png', 255-out_skeletonize,cmap='gray')
return out_skeletonize,out_thin
| [
"matplotlib.pyplot.imsave",
"skimage.morphology.thin",
"skimage.io.imread",
"matplotlib.pyplot.subplots",
"skimage.morphology.skeletonize"
] | [((466, 502), 'skimage.morphology.skeletonize', 'morphology.skeletonize', (['image_binary'], {}), '(image_binary)\n', (488, 502), False, 'from skimage import io, color, morphology\n'), ((515, 544), 'skimage.morphology.thin', 'morphology.thin', (['image_binary'], {}), '(image_binary)\n', (530, 544), False, 'from skimage import io, color, morphology\n'), ((569, 604), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 3)'}), '(1, 3, figsize=(10, 3))\n', (581, 604), True, 'import matplotlib.pyplot as plt\n'), ((607, 668), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""thinned_output.png"""', '(255 - out_thin)'], {'cmap': '"""gray"""'}), "('thinned_output.png', 255 - out_thin, cmap='gray')\n", (617, 668), True, 'import matplotlib.pyplot as plt\n'), ((667, 740), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""skeletonized_output.png"""', '(255 - out_skeletonize)'], {'cmap': '"""gray"""'}), "('skeletonized_output.png', 255 - out_skeletonize, cmap='gray')\n", (677, 740), True, 'import matplotlib.pyplot as plt\n'), ((342, 364), 'skimage.io.imread', 'io.imread', (['input_image'], {}), '(input_image)\n', (351, 364), False, 'from skimage import io, color, morphology\n')] |
from csv import DictReader
from xml.etree import ElementTree as ET
def csv2html_robust(txt, header=True, attr=None):
# Use DictReader because, despite what the docs say, reader() doesn't
# return an object with .fieldnames
# (DictReader expects an iterable that returns lines, so split on \n)
reader = DictReader(txt.split('\n'))
table = ET.Element("TABLE", **attr.get('TABLE', {}))
thead_tr = ET.SubElement(
ET.SubElement(table, "THEAD", **attr.get('THEAD', {})),
"TR")
tbody = ET.SubElement(table, "TBODY", **attr.get('TBODY', {}))
if header:
for name in reader.fieldnames:
ET.SubElement(thead_tr, "TD").text = name
for row in reader:
tr_elem = ET.SubElement(tbody, "TR", **attr.get('TR', {}))
# Use reader.fieldnames to query `row` in the correct order.
# (`row` isn't an OrderedDict prior to Python 3.6)
for field in reader.fieldnames:
td_elem = ET.SubElement(tr_elem, "TD", **attr.get('TD', {}))
td_elem.text = row[field]
return ET.tostring(table, method='html')
htmltxt = csv2html_robust(csvtxt, True, {
'TABLE': {'border': "1", 'summary': "csv2html extra program output"},
'THEAD': {'bgcolor': "yellow"},
'TBODY': {'bgcolor': "orange"}
})
print(htmltxt.decode('utf8'))
| [
"xml.etree.ElementTree.tostring",
"xml.etree.ElementTree.SubElement"
] | [((1072, 1105), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['table'], {'method': '"""html"""'}), "(table, method='html')\n", (1083, 1105), True, 'from xml.etree import ElementTree as ET\n'), ((647, 676), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['thead_tr', '"""TD"""'], {}), "(thead_tr, 'TD')\n", (660, 676), True, 'from xml.etree import ElementTree as ET\n')] |
# -*- coding: utf-8 -*-
from naomi import testutils
from . import snr_vad
class TestSNR_VADPlugin(testutils.Test_VADPlugin):
def setUp(self):
super(TestSNR_VADPlugin, self).setUp()
self.plugin = testutils.get_plugin_instance(
snr_vad.SNRPlugin,
self._test_input
)
# prime by running through one wav file
self.map_file()
| [
"naomi.testutils.get_plugin_instance"
] | [((218, 284), 'naomi.testutils.get_plugin_instance', 'testutils.get_plugin_instance', (['snr_vad.SNRPlugin', 'self._test_input'], {}), '(snr_vad.SNRPlugin, self._test_input)\n', (247, 284), False, 'from naomi import testutils\n')] |