id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
124730
|
import numpy as np
a_soll = np.zeros((1000,20), dtype=np.complex64)
for ind in range(a_soll.shape[0]):
for jnd in range(a_soll.shape[1]):
i = ind + 1
j = jnd + 1
a_soll[ind,jnd] = - i * 0.3 + 1j*( j*j + 0.4)
b_soll = np.zeros(1200, dtype=np.complex64)
for ind in range(b_soll.shape[0]):
i = ind + 1
b_soll[ind] = - i * 0.3 + 1j*( i + 0.4)
a = np.load("mtx.npy")
b = np.load("vec.npy")
print("A: ")
print(np.max(np.abs(a - a_soll)/a_soll ))
print("B: ")
print(np.max(np.abs(b - b_soll) / b_soll ))
|
124740
|
hidden_dim = 128
dilation = [1,2,4,8,16,32,64,128,256,512]
sample_rate = 16000
timestep = 6080
is_training = True
use_mulaw = True
batch_size = 1
num_epochs = 10000
save_dir = './logdir'
test_data = 'test.wav'
|
124784
|
import argparse
import os
import json
import copy
import pickle
import sys
import multiprocessing as mp
from pathlib import Path
from typing import Dict, List
from tqdm import tqdm
from collections import defaultdict, Counter
import logging
logger = logging.getLogger("root") # pylint: disable=invalid-name
logger.setLevel(logging.DEBUG)
from allennlp.data.dataset_readers.semantic_parsing.wikitables import util as wikitables_util
from allennlp.data.tokenizers.token import Token
from allennlp.semparse.domain_languages import ParsingError, ExecutionError
from weaksp.reader.util import load_jsonl, load_jsonl_table
from weaksp.reader.reader import WTReader
from weaksp.sempar.action_walker import ActionSpaceWalker
from weaksp.sempar.context.table_question_context import TableQuestionContext
from weaksp.sempar.domain_languages.wikitable_language import WikiTablesLanguage
from weaksp.sempar.domain_languages.wikitable_abstract_language import WikiTableAbstractLanguage
def prepare_corenlp(sqa_path, table_file, corenlp_input_output_list, corenlp_input_output):
table_dict = load_jsonl_table(table_file)
sqa_data = {}
train_examples = []
with open(os.path.join(sqa_path, 'train.tsv')) as infile:
for line in infile:
train_examples.append(line.strip().split('\t'))
test_examples = []
with open(os.path.join(sqa_path, 'test.tsv')) as infile:
for line in infile:
test_examples.append(line.strip().split('\t'))
examples = train_examples[1:] + test_examples[1:]
print('Number of SQA examples:', len(examples))
table_id_list = []
for example in examples:
qid = example[0]
annotator = example[1]
position = example[2]
question_id = f"{qid}_ann{annotator}_pos{position}"
question = example[3]
table_id = 't_'+example[4].split('/')[1].split('.')[0]
table_id_list.append(table_id)
table_lines = table_dict[table_id]["raw_lines"]
tokenized_question = [Token(token) for token in question.split()]
table_context = TableQuestionContext.read_from_lines(table_lines, tokenized_question)
answer_cell = eval(example[5])
answer_cell = sorted([eval(cell) for cell in answer_cell])
answer = []
try:
for cell in answer_cell:
row_index = cell[0]
column_index = cell[1]
column_name = table_context.column_index_to_name[column_index]
column_name = f"string_column:{column_name}"
cell_value = table_context.table_data[row_index][column_name]
answer.append(cell_value)
except:
print(question_id, "cannot extract answers by answer cell; using answer string")
answer = eval(example[6].strip('"').replace('""','"')) # a list
if qid not in sqa_data:
sqa_data[qid] = {}
if annotator not in sqa_data[qid]:
sqa_data[qid][annotator] = []
sqa_data[qid][annotator].append((position, question, answer))
print('Number of Unique Tables:', len(set(table_id_list)))
print('Creating inputs for CoreNLP...', end='')
f_list = open(corenlp_input_output_list, 'w')
for qid in sqa_data:
for annotator in sqa_data[qid]:
sqa_data[qid][annotator] = sorted(sqa_data[qid][annotator], key=lambda x: x[0]) # sort by position
for position in range(len(sqa_data[qid][annotator])):
questions_position = [q[1] for q in sqa_data[qid][annotator][:position+1]]
questions_position.reverse()
question_merge = ' <s> '.join(questions_position)
fname = os.path.join(os.path.abspath(corenlp_input_output), '{}_ann{}_pos{}_question.txt'.format(qid, annotator, position))
f_list.write(fname+'\n')
with open(fname, 'w') as f:
f.write(question_merge+'\n')
answer = sqa_data[qid][annotator][position][2]
fname = os.path.join(os.path.abspath(corenlp_input_output), '{}_ann{}_pos{}_answer.txt'.format(qid, annotator, position))
with open(fname, 'w') as f:
f.write('\n'.join(answer)+'\n')
f_list.close()
print('Done')
return sqa_data
def create_raw_input_split(qid2tableid, split, corenlp_input_output, split_tagged, fraction=None):
# read split
sqa_data = {}
examples = []
with open(split) as infile:
cnt = 0
for line in infile:
cnt += 1
if cnt == 1:
continue
examples.append(line.strip().split('\t'))
for example in examples:
qid = example[0]
annotator = example[1]
position = example[2]
if qid not in sqa_data:
sqa_data[qid] = {}
if annotator not in sqa_data[qid]:
sqa_data[qid][annotator] = {}
sqa_data[qid][annotator][position] = {}
qid_list = list(sqa_data.keys())
if fraction:
import random
random.shuffle(qid_list)
fraction_num = int(fraction*len(qid_list))
qid_list = qid_list[:fraction_num]
for qid in qid_list:
for annotator in sqa_data[qid]:
for position in sqa_data[qid][annotator]:
# read corenlp_input_output
corenlp_input = os.path.join(corenlp_input_output, '{}_ann{}_pos{}_question.txt'.format(qid, annotator, position))
corenlp_output = os.path.join(corenlp_input_output, '{}_ann{}_pos{}_question.txt.json'.format(qid, annotator, position))
with open(corenlp_input) as infile:
questions = infile.read().strip()
with open(corenlp_output) as infile:
data = json.load(infile)
tokens = []
lemmaTokens = []
posTags = []
nerTags = []
nerValues = []
for sentence in data['sentences']:
for token in sentence['tokens']:
tokens.append(token['word'])
lemmaTokens.append(token['lemma'])
posTags.append(token['pos'])
nerTags.append(token['ner'])
if 'normalizedNER' in token:
nerValues.append(token['normalizedNER'])
else:
nerValues.append('')
corenlp_input = os.path.join(corenlp_input_output, '{}_ann{}_pos{}_answer.txt'.format(qid, annotator, position))
with open(corenlp_input) as infile:
answer_list = [line.strip() for line in infile.readlines()]
sqa_data[qid][annotator][position]['utterance'] = questions
sqa_data[qid][annotator][position]['context'] = qid2tableid[qid]
sqa_data[qid][annotator][position]['targetValue'] = '|'.join(answer_list)
sqa_data[qid][annotator][position]['tokens'] = '|'.join(tokens)
sqa_data[qid][annotator][position]['lemmaTokens'] = '|'.join(lemmaTokens)
sqa_data[qid][annotator][position]['posTags'] = '|'.join(posTags)
sqa_data[qid][annotator][position]['nerTags'] = '|'.join(nerTags)
sqa_data[qid][annotator][position]['nerValues'] = '|'.join(nerValues)
sqa_data[qid][annotator][position]['targetCanon'] = sqa_data[qid][annotator][position]['targetValue']
sqa_data[qid][annotator][position]['targetCanonType'] = 'undefined'
# output
with open(split_tagged, 'w') as f:
f.write('\t'.join(['id', 'utterance', 'context', 'targetValue', 'tokens',
'lemmaTokens', 'posTags', 'nerTags', 'nerValues', 'targetCanon', 'targetCanonType']))
f.write('\n')
for qid in qid_list:
for annotator in sqa_data[qid]:
for position in sqa_data[qid][annotator]:
f.write('\t'.join(['{}_ann{}_pos{}'.format(qid, annotator, position),
sqa_data[qid][annotator][position]['utterance'],
sqa_data[qid][annotator][position]['context'],
sqa_data[qid][annotator][position]['targetValue'],
sqa_data[qid][annotator][position]['tokens'],
sqa_data[qid][annotator][position]['lemmaTokens'],
sqa_data[qid][annotator][position]['posTags'],
sqa_data[qid][annotator][position]['nerTags'],
sqa_data[qid][annotator][position]['nerValues'],
sqa_data[qid][annotator][position]['targetCanon'],
sqa_data[qid][annotator][position]['targetCanonType']]))
f.write('\n')
def prepare_raw_input(sqa_path, corenlp_input_output, raw_input):
examples = []
with open(os.path.join(sqa_path, 'train.tsv')) as infile:
cnt = 0
for line in infile:
cnt += 1
if cnt == 1:
continue
examples.append(line.strip().split('\t'))
with open(os.path.join(sqa_path, 'test.tsv')) as infile:
cnt = 0
for line in infile:
cnt += 1
if cnt == 1:
continue
examples.append(line.strip().split('\t'))
qid2tableid = {}
for example in examples:
qid = example[0]
annotator = example[1]
position = example[2]
table_id = example[4].split('/')[1].split('.')[0].split('_')
qid2tableid[qid] = 'csv/{}-csv/{}.csv'.format(table_id[0],table_id[1])
train_split = os.path.join(sqa_path, 'random-split-1-train.tsv')
dev_split = os.path.join(sqa_path, 'random-split-1-dev.tsv')
test_split = os.path.join(sqa_path, 'test.tsv')
if not os.path.exists(os.path.join(raw_input, 'data')):
os.makedirs(os.path.join(raw_input, 'data'))
train_split_tagged = os.path.join(raw_input, 'data/training.tagged')
dev_split_tagged = os.path.join(raw_input, 'data/dev.tagged')
test_split_tagged = os.path.join(raw_input, 'data/test.tagged')
create_raw_input_split(qid2tableid, train_split, corenlp_input_output, train_split_tagged)
create_raw_input_split(qid2tableid, dev_split, corenlp_input_output, dev_split_tagged)
create_raw_input_split(qid2tableid, test_split, corenlp_input_output, test_split_tagged)
return
def cache_data(table_file, train_file, dev_file, test_file, embed_file, output_file):
tables = load_jsonl_table(table_file)
train_examples = load_jsonl(train_file)
dev_examples = load_jsonl(dev_file)
test_examples = load_jsonl(test_file)
wt_reader = WTReader(tables, train_examples, dev_examples, test_examples, embed_file)
wt_reader.gen_vocab()
wt_reader.gen_glove()
wt_reader.check()
with open(output_file, "wb") as f:
pickle.dump(wt_reader, f)
return
def coverage_example(example: Dict, table_lines: Dict, max_sketch_length: int) -> int :
question_id = example["id"]
output_file_pointer = open(os.path.join('data/processed_sqa/mp/', question_id), "w")
utterance = example["question"]
sketch_candidates = []
table_id = example["context"]
target_value, target_can = example["answer"] # (targeValue, targetCan)
tokens = []
assert len(example["tokens"]) == len(example["processed_tokens"])
for t,p_t in zip(example["tokens"], example["processed_tokens"]):
if t in ['<START>', '<DECODE>']:
tokens.append(p_t)
else:
tokens.append(t)
example["tokens"] = tokens
tokenized_question = [ Token(token) for token in example["tokens"]]
context = TableQuestionContext.read_from_lines(table_lines, tokenized_question)
context.take_corenlp_entities(example["entities"])
world = WikiTableAbstractLanguage(context)
walker = ActionSpaceWalker(world)
print(f"{question_id} {utterance}", file=output_file_pointer)
print(f"Table: {table_id}", file=output_file_pointer)
sketch2lf = defaultdict(list)
all_logical_forms = walker.get_logical_forms_by_sketches(max_sketch_length, None)
# output the correct logical form
for sketch, logical_form in all_logical_forms:
sketch = world.action_sequence_to_logical_form(sketch)
if world.evaluate_logical_form(logical_form, target_value, target_can):
sketch2lf[sketch].append(logical_form)
question_id = example["id"]
utterance = example["question"]
if len(sketch2lf) == 0:
print("NO LOGICAL FORMS FOUND!", file=output_file_pointer)
coverage_counter = 0
else:
coverage_counter = 1
for sketch in sketch2lf:
print("Sketch:", sketch, file=output_file_pointer)
for lf in sketch2lf[sketch]:
print("\t", lf, file=output_file_pointer)
print(file=output_file_pointer)
print(file=output_file_pointer)
output_file_pointer.close()
return coverage_counter
def coverage(examples: Dict,
max_sketch_length: int,
table_dict: Dict,
output_path: str) -> None :
coverage_counter = 0
examples_filter = []
table_lines_filter = []
for example in examples:
table_id = example["context"]
examples_filter.append(example)
table_lines_filter.append(table_dict[table_id]["raw_lines"])
pool = mp.Pool(processes=10)
results = [pool.apply_async(coverage_example, args=(example, table_lines, max_sketch_length,)) for example, table_lines in zip(examples_filter, table_lines_filter)]
output = [p.get() for p in results]
coverage_counter = sum(output)
for example in examples_filter:
question_id = example["id"]
output_file_pointer = os.path.join('data/processed_sqa/mp/', question_id)
command = f"cat {output_file_pointer} >> {output_path}"
os.system(command)
print(f"Coverage: {coverage_counter}/{len(examples)}")
return
def search_program(exp_id, max_sketch_length, table_file, train_file, dev_file, test_file):
print(f"Exp id: {exp_id}")
# load examples
train_examples = load_jsonl(train_file)
dev_examples = load_jsonl(dev_file)
test_examples = load_jsonl(test_file)
tables = load_jsonl_table(table_file)
train_examples = train_examples + dev_examples
wt_reader = WTReader(tables, train_examples, [], test_examples, None)
wt_reader.check()
# evaluate the sketches
output_path = f"data/processed_sqa/{exp_id}.train.programs"
coverage(wt_reader.train_examples, max_sketch_length, wt_reader.table_dict, output_path)
output_path = f"data/processed_sqa/{exp_id}.test.programs"
coverage(wt_reader.test_examples, max_sketch_length, wt_reader.table_dict, output_path)
return
def cache_program(program_file_name, section, output_filename):
_dc_sketch_list = []
id2question = []
question2program = dict()
program_counter = 0
coverage = dict()
with open(program_file_name, "r") as f:
for line in f:
line = line[:-1]
if section == "test":
flag = ["nu-"]
else:
flag = ["nt-", "ns-"]
# if line.startswith(flag):
if line[:3] in flag:
q_id = line.split()[0]
line = next(f)[:-1]
lh, rh = line.split()
assert lh == "Table:"
q_t_pair = (q_id, rh)
id2question.append(q_t_pair)
qid, annotator, position = q_id.split('_')
annotator = annotator[3:]
position = position[3:]
# TODO
q_t_pair = (f"{qid}_ann{annotator}_pos{position}", rh)
if qid not in coverage:
coverage[qid] = {}
if annotator not in coverage[qid]:
coverage[qid][annotator] = {}
coverage[qid][annotator][position] = 0
line = next(f)[:-1]
if line == "NO LOGICAL FORMS FOUND!":
line = next(f) #blank line
continue
coverage[qid][annotator][position] = 1
_sketch2program = defaultdict(list)
while line:
assert line.startswith("Sketch: ")
_dc_sketch_list.append(line)
sketch = line.strip()[8:]
line = next(f)[:-1]
while line.startswith("\t"):
program_counter += 1
line = line.strip()
_sketch2program[sketch].append(line)
line = next(f)[:-1]
question2program[q_t_pair] = _sketch2program
print(f"Raw number of sketch (from program): \
{len(set(_dc_sketch_list))}")
print(f"Raw number of programs: {program_counter}")
# double check
counter = 0
with open(program_file_name, "r") as f:
for line in f:
if line.startswith("\t"):
counter += 1
print(f"double check # of programs: {counter}")
with open(output_filename, 'wb') as f:
pickle.dump(question2program, f)
# print coverage stats
all_cnt = 0
all_cnt_correct = 0
seq_cnt = 0
seq_cnt_correct = 0
q1_cnt = 0
q1_cnt_correct = 0
q2_cnt = 0
q2_cnt_correct = 0
q3_cnt = 0
q3_cnt_correct = 0
last_cnt_correct = 0
f = open('fail_list.txt', 'w')
for qid in coverage:
for annotator in coverage[qid]:
seq_cnt += 1
seq_correct = True
for position in coverage[qid][annotator]:
all_cnt += 1
result_i = coverage[qid][annotator][position]
if result_i == 0:
f.write(f"{qid}_ann{annotator}_pos{position}"+'\n')
all_cnt_correct += result_i
if result_i == 0:
seq_correct = False
if position == '0':
q1_cnt += 1
q1_cnt_correct += result_i
if position == '1':
q2_cnt += 1
q2_cnt_correct += result_i
if position == '2':
q3_cnt += 1
q3_cnt_correct += result_i
if int(position) == len(coverage[qid][annotator])-1:
last_cnt_correct += result_i
if seq_correct:
seq_cnt_correct += 1
f.close()
all_accuracy = 100 * float(all_cnt_correct) / all_cnt
seq_accuracy = 100 * float(seq_cnt_correct) / seq_cnt
q1_accuracy = 100 * float(q1_cnt_correct) / q1_cnt
q2_accuracy = 100 * float(q2_cnt_correct) / q2_cnt
q3_accuracy = 100 * float(q3_cnt_correct) / q3_cnt
print('ALL: ', all_cnt_correct, all_cnt)
print('SEQ: ', seq_cnt_correct, seq_cnt)
print('Q1: ', q1_cnt_correct, q1_cnt)
print('Q2: ', q2_cnt_correct, q2_cnt)
print('Q3: ', q3_cnt_correct, q3_cnt)
print('Last: ', last_cnt_correct)
print('ALL SEQ Q1 Q2 Q3')
print("{:.1f} {:.1f} {:.1f} {:.1f} {:.1f}".format(all_accuracy, seq_accuracy, q1_accuracy, q2_accuracy, q3_accuracy))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess SQA")
parser.add_argument("--step", choices=["prepare_corenlp", "prepare_raw_input", "cache_data", "search_program", "cache_program"], help="step in preprocessing")
# prepare_corenlp
parser.add_argument("--sqa_path", help="")
parser.add_argument("--table_file", help="")
parser.add_argument("--corenlp_input_output_list", help="")
parser.add_argument("--corenlp_input_output", help="")
# prepare_raw_input
parser.add_argument("--raw_input", help="")
# cache_data
parser.add_argument("--train_file", help="")
parser.add_argument("--dev_file", help="")
parser.add_argument("--test_file", help="")
parser.add_argument("--embed_file", help="")
parser.add_argument("--output_file", help="")
# search_program
parser.add_argument("--exp_id", help="")
parser.add_argument("--max_sketch_length", type=int, help="")
# cache_program
parser.add_argument("--program_file_name", help="")
parser.add_argument("--section", help="")
parser.add_argument("--output_filename", help="")
args = parser.parse_args()
if args.step == "prepare_corenlp":
if not os.path.exists(args.corenlp_input_output):
os.makedirs(args.corenlp_input_output)
prepare_corenlp(args.sqa_path, args.table_file, args.corenlp_input_output_list, args.corenlp_input_output)
elif args.step == "prepare_raw_input":
prepare_raw_input(args.sqa_path, args.corenlp_input_output, args.raw_input)
elif args.step == "cache_data":
cache_data(args.table_file, args.train_file, args.dev_file, args.test_file, args.embed_file, args.output_file)
elif args.step == "search_program":
log_path = "log/eval_coverage_debug.log"
if not os.path.exists('log/'):
os.makedirs('log/')
fh = logging.FileHandler(log_path)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
if not os.path.exists('data/processed_sqa/mp'):
os.makedirs('data/processed_sqa/mp')
search_program(args.exp_id, args.max_sketch_length, args.table_file, args.train_file, args.dev_file, args.test_file)
elif args.step == "cache_program":
cache_program(args.program_file_name, args.section, args.output_filename)
|
124800
|
from collections import OrderedDict
from itertools import islice
from typing import List
from app.master.build import Build
from app.util.exceptions import ItemNotFoundError
class BuildStore:
"""
Build storage service that stores and handles all builds.
"""
_all_builds_by_id = OrderedDict()
@classmethod
def get(cls, build_id: int) -> Build:
"""
Returns a build by id
:param build_id: The id for the build whose status we are getting
"""
build = cls._all_builds_by_id.get(build_id)
if build is None:
raise ItemNotFoundError('Invalid build id: {}.'.format(build_id))
return build
@classmethod
def get_range(cls, start: int, end: int) -> List[Build]:
"""
Returns a list of all builds.
:param start: The starting index of the requested build
:param end: 1 + the index of the last requested element, although if this is greater than the total number
of builds available the length of the returned list may be smaller than (end - start)
"""
requested_builds = islice(cls._all_builds_by_id, start, end)
return [cls._all_builds_by_id[key] for key in requested_builds]
@classmethod
def add(cls, build: Build):
"""
Add new build to collection
:param build: The build to add to the store
"""
cls._all_builds_by_id[build.build_id()] = build
@classmethod
def size(cls) -> int:
"""
Return the amount of builds within the store
"""
return len(cls._all_builds_by_id)
|
124807
|
import tensorflow as tf
from base_model import BaseModel
class LSTMNN(object):
"""
LSTM neural network class, inherits from BaseModel
"""
def train(self):
"""
Fit the LSTM neural network to the data
"""
raise NotImplementedError()
|
124848
|
class Solution:
def mySqrt(self, x: int) -> int:
left, right = 0, x
while left <= right:
mid = left + (right - left) // 2
square = mid ** 2
if square <= x:
left = mid + 1
elif square > x :
right = mid -1
return left-1
# n : the number of input value
## Time Complexity: O( log n )
#
# The overhead in time is the upper-bound of binary search, which is of O( log n ).
## Space Complexity: O( 1 )
#
# The overhead in space is the variable for mathematical computation, which is of O( 1 )
def test_bench():
test_data = [0, 1, 80, 63, 48 ]
# expected output:
'''
0
1
8
7
6
'''
for n in test_data:
print( Solution().mySqrt(n) )
return
if __name__ == '__main__':
test_bench()
|
124923
|
collect_ignore = []
try:
import sklearn
except ImportError:
collect_ignore.append('compat/sklearn.py')
collect_ignore.append('compat/test_sklearn.py')
try:
import sqlalchemy
except ImportError:
collect_ignore.append('stream/iter_sql.py')
collect_ignore.append('stream/test_sql.py')
try:
import surprise
except ImportError:
collect_ignore.append('reco/surprise.py')
try:
import torch
except ImportError:
collect_ignore.append('compat/pytorch.py')
try:
import vaex
except ImportError:
collect_ignore.append('stream/iter_vaex.py')
|
125002
|
import pytest
from stock_indicators import indicators
class TestVortex:
def test_standard(self, quotes):
results = indicators.get_vortex(quotes, 14)
assert 502 == len(results)
assert 488 == len(list(filter(lambda x: x.pvi is not None, results)))
r = results[13]
assert r.pvi is None
assert r.nvi is None
r = results[14]
assert 1.0460 == round(float(r.pvi), 4)
assert 0.8119 == round(float(r.nvi), 4)
r = results[29]
assert 1.1300 == round(float(r.pvi), 4)
assert 0.7393 == round(float(r.nvi), 4)
r = results[249]
assert 1.1558 == round(float(r.pvi), 4)
assert 0.6634 == round(float(r.nvi), 4)
r = results[501]
assert 0.8712 == round(float(r.pvi), 4)
assert 1.1163 == round(float(r.nvi), 4)
def test_bad_data(self, bad_quotes):
r = indicators.get_vortex(bad_quotes, 20)
assert 502 == len(r)
def test_no_quotes(self, quotes):
r = indicators.get_vortex([], 5)
assert 0 == len(r)
r = indicators.get_vortex(quotes[:1], 5)
assert 1 == len(r)
def test_removed(self, quotes):
results = indicators.get_vortex(quotes, 14)
results = results.remove_warmup_periods()
assert 502 - 14 == len(results)
last = results.pop()
assert 0.8712 == round(float(last.pvi), 4)
assert 1.1163 == round(float(last.nvi), 4)
def test_exceptions(self, quotes):
from System import ArgumentOutOfRangeException
with pytest.raises(ArgumentOutOfRangeException):
indicators.get_vortex(quotes, 1)
|
125056
|
import numpy as np
import os, pickle
from tqdm import tqdm
def get_word_emb(word2coef_dict, word, default_value):
return word2coef_dict.get(word, default_value)
def get_phrase_emb(word2coef_dict, phrase, default_value):
words = phrase.split(' ')
embs = [ get_word_emb(word2coef_dict, word, default_value) for word in words ]
return np.mean(embs, axis=0)
def init_glove_data(glove_fname, glove_outname):
print(f'Constructing glove dictionary from {glove_fname} ...... ')
word2coef_dict = {}
running_sum = np.zeros((300,))
with open(os.path.join(glove_fname), 'r') as f:
for idx, line in enumerate(tqdm( list(f) )):
values = line.split()
word = ''.join(values[0:-300])
coefs = np.asarray(values[-300:], dtype='float32')
running_sum += coefs
word2coef_dict[word] = coefs
average_emb = running_sum / (idx + 1)
with open( os.path.join(glove_outname, 'glove_dict.pkl'), 'wb') as f:
pickle.dump( word2coef_dict, f)
np.save( os.path.join(glove_outname, 'default_value'), average_emb)
print(f'Glove dictionary saved at {glove_outname}')
|
125092
|
import chardet
import csv
from dateutil.parser import parse
def get_encoding(ds_path: str) -> str:
""" Returns the encoding of the file """
test_str = b''
number_of_lines_to_read = 500
count = 0
with open(ds_path, 'rb') as f:
line = f.readline()
while line and count < number_of_lines_to_read:
test_str = test_str + line
count += 1
line = f.readline()
result = chardet.detect(test_str)
if result['encoding'] == 'ascii':
return 'utf-8'
else:
return result['encoding']
def get_delimiter(ds_path: str) -> str:
""" Returns the delimiter of the csv file """
with open(ds_path) as f:
first_line = f.readline()
s = csv.Sniffer()
return str(s.sniff(first_line).delimiter)
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
:param string: str, string to check for date
:param fuzzy: bool, ignore unknown tokens in string if True
"""
try:
parse(str(string), fuzzy=fuzzy)
return True
except Exception:
return False
|
125136
|
from django.shortcuts import get_object_or_404
from django.views.generic import RedirectView
from common.models import Allegation, AllegationCategory
from common.utils.mobile_url_hash_util import MobileUrlHashUtil
from share.models import Session
from url_mediator.services.session_builder import Builder, AllegationCrid, FilterTags, AllegationType
class AllegationView(RedirectView):
def get_redirect_url(self, crid=None, category_slug=None, cat_hash=None):
allegation = get_object_or_404(Allegation, crid=crid)
cat_id = MobileUrlHashUtil().decode(cat_hash)
category = get_object_or_404(AllegationCategory, pk=cat_id)
session_query = Builder(
FilterTags(
AllegationCrid(crids=[allegation.crid]),
AllegationType(categories=[(category.id, category.category)])
)
).build()
session = Session(query=session_query)
session.save()
return session.get_absolute_url()
|
125155
|
from gym.envs.registration import register
register(
id="Pusher-v1",
entry_point="micoenv.mico_robot_env:MicoEnv",
kwargs={
"randomize_arm": True,
"randomize_camera": True,
"randomize_textures": True,
"randomize_objects": True,
"normal_textures": True,
"done_after": 300,
'target_in_the_air': False,
"has_object": True,
"reward_type": "positive",
"observation_type": "pixels",
}
)
|
125197
|
from docopt import docopt
from abbr import __main__
from abbr.core import main
_mocked_html = """
<html>
<table class="no-margin">
<tbody>
<tr>
<dir>
<span class="sf" />
<span class="sf" />
</dir>
<p class="desc">term1</p>
<td>
<a>abbr1</a>
</td>
<p class="path">
<a>category1</a>
</p>
</tr>
<tr>
<dir>
<span class="sf" />
<span class="sf" />
<span class="sf" />
</dir>
<p class="desc">TERM2</p>
<td>
<a>ABBR2</a>
</td>
<p class="path">
<a>category2</a>
</p>
</tr>
<tr>
<dir>
<span class="sf" />
<span class="sf" />
<span class="sf" />
<span class="sf" />
</dir>
<p class="desc">Term3</p>
<td>
<a>Abbr3</a>
</td>
<p class="path">
<a>category3</a>
</p>
</tr>
</tbody>
</table>
</html>
"""
def abbr_cli(args: str):
args = docopt(__main__.__doc__, args.split())
return main(args)
|
125215
|
from pubnub.endpoints.file_operations.file_based_endpoint import FileOperationEndpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.crypto import PubNubFileCrypto
from pubnub.models.consumer.file import PNDownloadFileResult
from pubnub.request_handlers.requests_handler import RequestsRequestHandler
from pubnub.endpoints.file_operations.get_file_url import GetFileDownloadUrl
class DownloadFileNative(FileOperationEndpoint):
def __init__(self, pubnub):
FileOperationEndpoint.__init__(self, pubnub)
self._file_id = None
self._file_name = None
self._pubnub = pubnub
self._download_data = None
self._cipher_key = None
def cipher_key(self, cipher_key):
self._cipher_key = cipher_key
return self
def build_path(self):
return self._download_data.result.file_url
def http_method(self):
return HttpMethod.GET
def is_auth_required(self):
return False
def custom_params(self):
return {}
def file_id(self, file_id):
self._file_id = file_id
return self
def file_name(self, file_name):
self._file_name = file_name
return self
def decrypt_payload(self, data):
return PubNubFileCrypto(self._pubnub.config).decrypt(
self._cipher_key or self._pubnub.config.cipher_key,
data
)
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
self.validate_file_name()
self.validate_file_id()
def create_response(self, envelope):
if self._cipher_key or self._pubnub.config.cipher_key:
return PNDownloadFileResult(self.decrypt_payload(envelope.content))
else:
return PNDownloadFileResult(envelope.content)
def non_json_response(self):
return True
def operation_type(self):
return PNOperationType.PNDownloadFileAction
def use_base_path(self):
return False
def build_params_callback(self):
return lambda a: {}
def name(self):
return "Downloading file"
def sync(self):
self._download_data = GetFileDownloadUrl(self._pubnub)\
.channel(self._channel)\
.file_name(self._file_name)\
.file_id(self._file_id)\
.sync()
return super(DownloadFileNative, self).sync()
def pn_async(self, callback):
return RequestsRequestHandler(self._pubnub).async_file_based_operation(self.sync, callback, "File Download")
|
125233
|
import unittest
import io
from sievelib.factory import FiltersSet
from .. import parser
class FactoryTestCase(unittest.TestCase):
def setUp(self):
self.fs = FiltersSet("test")
def test_get_filter_conditions(self):
"""Test get_filter_conditions method."""
orig_conditions = [('Sender', ":is", '<EMAIL>')]
self.fs.addfilter(
"ruleX",
orig_conditions,
[("fileinto", ":copy", "Toto"), ])
conditions = self.fs.get_filter_conditions("ruleX")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [("exists", "list-help", "list-unsubscribe",
"list-subscribe", "list-owner")]
self.fs.addfilter(
"ruleY",
orig_conditions,
[("fileinto", 'List')]
)
conditions = self.fs.get_filter_conditions("ruleY")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [('Sender', ":notis", '<EMAIL>')]
self.fs.addfilter(
"ruleZ",
orig_conditions,
[("fileinto", ":copy", "Toto"), ])
conditions = self.fs.get_filter_conditions("ruleZ")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [("notexists", "list-help", "list-unsubscribe",
"list-subscribe", "list-owner")]
self.fs.addfilter(
"ruleA",
orig_conditions,
[("fileinto", 'List')]
)
conditions = self.fs.get_filter_conditions("ruleA")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [("envelope", ":is", ["From"], ["hello"])]
self.fs.addfilter(
"ruleB",
orig_conditions,
[("fileinto", "INBOX")]
)
conditions = self.fs.get_filter_conditions("ruleB")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [("body", ":raw", ":notcontains", "matteo")]
self.fs.addfilter(
"ruleC",
orig_conditions,
[("fileinto", "INBOX")]
)
conditions = self.fs.get_filter_conditions("ruleC")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [(
"currentdate", ":zone", "+0100", ":notis", "date", "2019-02-26"
)]
self.fs.addfilter(
"ruleD",
orig_conditions,
[("fileinto", "INBOX")]
)
conditions = self.fs.get_filter_conditions("ruleD")
self.assertEqual(orig_conditions, conditions)
orig_conditions = [(
"currentdate", ":zone", "+0100", ":value", "gt", "date",
"2019-02-26"
)]
self.fs.addfilter(
"ruleE",
orig_conditions,
[("fileinto", "INBOX")]
)
conditions = self.fs.get_filter_conditions("ruleE")
self.assertEqual(orig_conditions, conditions)
def test_get_filter_conditions_from_parser_result(self):
res = """require ["fileinto"];
# rule:[test]
if anyof (exists ["Subject"]) {
fileinto "INBOX";
}
"""
p = parser.Parser()
p.parse(res)
fs = FiltersSet("test", '# rule:')
fs.from_parser_result(p)
c = fs.get_filter_conditions('[test]')
self.assertEqual(c, [("exists", "Subject")])
res = """require ["date", "fileinto"];
# rule:aaa
if anyof (currentdate :zone "+0100" :is "date" ["2019-03-27"]) {
fileinto "INBOX";
}
"""
p = parser.Parser()
p.parse(res)
fs = FiltersSet("aaa", "# rule:")
fs.from_parser_result(p)
c = fs.get_filter_conditions('aaa')
self.assertEqual(
c, [('currentdate', ':zone', '+0100', ':is', 'date', '2019-03-27')]
)
res = """require ["envelope", "fileinto"];
# rule:[aaa]
if anyof (envelope :contains ["To"] ["<EMAIL>"]) {
fileinto "INBOX";
}
"""
p = parser.Parser()
p.parse(res)
fs = FiltersSet("aaa", "# rule:")
fs.from_parser_result(p)
c = fs.get_filter_conditions('[aaa]')
self.assertEqual(
c, [('envelope', ':contains', ['To'], ['<EMAIL>'])]
)
def test_get_filter_matchtype(self):
"""Test get_filter_matchtype method."""
self.fs.addfilter(
"ruleX",
[('Sender', ":is", '<EMAIL>'), ],
[("fileinto", ":copy", "Toto"), ])
match_type = self.fs.get_filter_matchtype("ruleX")
self.assertEqual(match_type, "anyof")
def test_get_filter_actions(self):
"""Test get_filter_actions method."""
self.fs.addfilter(
"ruleX",
[('Sender', ":is", '<EMAIL>'), ],
[("fileinto", ":copy", "Toto"), ])
actions = self.fs.get_filter_actions("ruleX")
self.assertIn("fileinto", actions[0])
self.assertIn(":copy", actions[0])
self.assertIn("Toto", actions[0])
self.fs.addfilter(
"ruleY",
[("Subject", ":contains", "aaa")],
[("stop",)]
)
actions = self.fs.get_filter_actions("ruleY")
self.assertIn("stop", actions[0])
def test_add_header_filter(self):
output = io.StringIO()
self.fs.addfilter(
"rule1",
[('Sender', ":is", '<EMAIL>'), ],
[("fileinto", ":copy", "Toto"), ])
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["fileinto", "copy"];
# Filter: rule1
if anyof (header :is "Sender" "<EMAIL>") {
fileinto :copy "Toto";
}
""")
output.close()
def test_use_action_with_tag(self):
output = io.StringIO()
self.fs.addfilter(
"rule1",
[('Sender', ":is", '<EMAIL>'), ],
[("redirect", ":copy", "<EMAIL>"), ])
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["copy"];
# Filter: rule1
if anyof (header :is "Sender" "<EMAIL>") {
redirect :copy "<EMAIL>";
}
""")
output.close()
def test_add_header_filter_with_not(self):
output = io.StringIO()
self.fs.addfilter(
"rule1",
[('Sender', ":notcontains", '<EMAIL>')],
[("fileinto", 'Toto')])
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["fileinto"];
# Filter: rule1
if anyof (not header :contains "Sender" "<EMAIL>") {
fileinto "Toto";
}
""")
def test_add_exists_filter(self):
output = io.StringIO()
self.fs.addfilter(
"rule1",
[('exists', "list-help", "list-unsubscribe",
"list-subscribe", "list-owner")],
[("fileinto", 'Toto')]
)
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["fileinto"];
# Filter: rule1
if anyof (exists ["list-help","list-unsubscribe","list-subscribe","list-owner"]) {
fileinto "Toto";
}
""")
def test_add_exists_filter_with_not(self):
output = io.StringIO()
self.fs.addfilter(
"rule1",
[('notexists', "list-help", "list-unsubscribe",
"list-subscribe", "list-owner")],
[("fileinto", 'Toto')]
)
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["fileinto"];
# Filter: rule1
if anyof (not exists ["list-help","list-unsubscribe","list-subscribe","list-owner"]) {
fileinto "Toto";
}
""")
def test_add_size_filter(self):
output = io.StringIO()
self.fs.addfilter(
"rule1",
[('size', ":over", "100k")],
[("fileinto", 'Totoéé')]
)
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["fileinto"];
# Filter: rule1
if anyof (size :over 100k) {
fileinto "Totoéé";
}
""")
def test_remove_filter(self):
self.fs.addfilter("rule1",
[('Sender', ":is", '<EMAIL>')],
[("fileinto", 'Toto')])
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.assertEqual(self.fs.removefilter("rule1"), True)
self.assertIs(self.fs.getfilter("rule1"), None)
def test_disablefilter(self):
"""
FIXME: Extra spaces are written between if and anyof, why?!
"""
self.fs.addfilter("rule1",
[('Sender', ":is", '<EMAIL>')],
[("fileinto", 'Toto')])
self.assertIsNot(self.fs.getfilter("rule1"), None)
self.assertEqual(self.fs.disablefilter("rule1"), True)
output = io.StringIO()
self.fs.tosieve(output)
self.assertEqual(output.getvalue(), """require ["fileinto"];
# Filter: rule1
if false {
if anyof (header :is "Sender" "<EMAIL>") {
fileinto "Toto";
}
}
""")
output.close()
self.assertEqual(self.fs.is_filter_disabled("rule1"), True)
def test_add_filter_unicode(self):
"""Add a filter containing unicode data."""
name = u"Test\xe9".encode("utf-8")
self.fs.addfilter(
name,
[('Sender', ":is", '<EMAIL>'), ],
[("fileinto", 'Toto'), ])
self.assertIsNot(self.fs.getfilter("Testé"), None)
self.assertEqual("{}".format(self.fs), """require ["fileinto"];
# Filter: Testé
if anyof (header :is "Sender" "<EMAIL>") {
fileinto "Toto";
}
""")
def test_add_body_filter(self):
"""Add a body filter."""
self.fs.addfilter(
"test",
[("body", ":raw", ":contains", "matteo")],
[("fileinto", "Toto")]
)
self.assertEqual("{}".format(self.fs), """require ["body", "fileinto"];
# Filter: test
if anyof (body :contains :raw ["matteo"]) {
fileinto "Toto";
}
""")
def test_add_notbody_filter(self):
"""Add a not body filter."""
self.fs.addfilter(
"test",
[("body", ":raw", ":notcontains", "matteo")],
[("fileinto", "Toto")]
)
self.assertEqual("{}".format(self.fs), """require ["body", "fileinto"];
# Filter: test
if anyof (not body :contains :raw ["matteo"]) {
fileinto "Toto";
}
""")
def test_add_envelope_filter(self):
"""Add a envelope filter."""
self.fs.addfilter(
"test",
[("envelope", ":is", ["From"], ["hello"])],
[("fileinto", "INBOX")]
)
self.assertEqual("{}".format(self.fs), """require ["envelope", "fileinto"];
# Filter: test
if anyof (envelope :is ["From"] ["hello"]) {
fileinto "INBOX";
}
""")
def test_add_notenvelope_filter(self):
"""Add a not envelope filter."""
self.fs.addfilter(
"test",
[("envelope", ":notis", ["From"], ["hello"])],
[("fileinto", "INBOX")]
)
self.assertEqual("{}".format(self.fs), """require ["envelope", "fileinto"];
# Filter: test
if anyof (not envelope :is ["From"] ["hello"]) {
fileinto "INBOX";
}
""")
def test_add_currentdate_filter(self):
"""Add a currentdate filter."""
self.fs.addfilter(
"test",
[("currentdate", ":zone", "+0100", ":is", "date", "2019-02-26")],
[("fileinto", "INBOX")]
)
self.assertEqual("{}".format(self.fs), """require ["date", "fileinto"];
# Filter: test
if anyof (currentdate :zone "+0100" :is "date" ["2019-02-26"]) {
fileinto "INBOX";
}
""")
self.fs.removefilter("test")
self.fs.addfilter(
"test",
[("currentdate", ":zone", "+0100", ":value", "gt", "date",
"2019-02-26")],
[("fileinto", "INBOX")]
)
self.assertEqual("{}".format(self.fs), """require ["date", "fileinto", "relational"];
# Filter: test
if anyof (currentdate :zone "+0100" :value "gt" "date" ["2019-02-26"]) {
fileinto "INBOX";
}
""")
if __name__ == "__main__":
unittest.main()
|
125265
|
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import registry
reg: registry = registry()
Base = declarative_base()
class SomeAbstract(Base):
__abstract__ = True
class HasUpdatedAt:
updated_at = Column(Integer)
@reg.mapped
class Foo(SomeAbstract):
__tablename__ = "foo"
id: int = Column(Integer(), primary_key=True)
name: str = Column(String)
class Bar(HasUpdatedAt, Base):
__tablename__ = "bar"
id = Column(Integer(), primary_key=True)
num = Column(Integer)
Bar.__mapper__
# EXPECTED_MYPY: "Type[HasUpdatedAt]" has no attribute "__mapper__"
HasUpdatedAt.__mapper__
# EXPECTED_MYPY: "Type[SomeAbstract]" has no attribute "__mapper__"
SomeAbstract.__mapper__
|
125347
|
import re
from .git2_types import Git2Type
from .git2_type_common import (
Git2TypeConstObject,
Git2TypeOutObject,
PAT1_STR,
PAT2_STR,
PAT3_STR,
)
class Git2TypeConstRebaseOptions(Git2TypeConstObject):
PAT = re.compile(PAT1_STR + "(?P<obj_name>rebase_options)" + PAT2_STR)
class Git2TypeOutRebaseOptions(Git2TypeOutObject):
PAT = re.compile(PAT1_STR + "(?P<obj_name>rebase_options)" + PAT3_STR)
class Git2TypeConstRebase(Git2TypeConstObject):
PAT = re.compile(PAT1_STR + "(?P<obj_name>rebase)" + PAT2_STR)
class Git2TypeOutRebase(Git2TypeOutObject):
PAT = re.compile(PAT1_STR + "(?P<obj_name>rebase)" + PAT3_STR)
class Git2TypeConstRebaseOperation(Git2TypeConstObject):
PAT = re.compile(PAT1_STR + "(?P<obj_name>rebase_operation)" + PAT2_STR)
class Git2TypeOutRebaseOperation(Git2TypeOutObject):
PAT = re.compile(PAT1_STR + "(?P<obj_name>rebase_operation)" + PAT3_STR)
|
125368
|
del_items(0x80122A40)
SetType(0x80122A40, "struct Creds CreditsTitle[6]")
del_items(0x80122BE8)
SetType(0x80122BE8, "struct Creds CreditsSubTitle[28]")
del_items(0x80123084)
SetType(0x80123084, "struct Creds CreditsText[35]")
del_items(0x8012319C)
SetType(0x8012319C, "int CreditsTable[224]")
del_items(0x801243BC)
SetType(0x801243BC, "struct DIRENTRY card_dir[16][2]")
del_items(0x801248BC)
SetType(0x801248BC, "struct file_header card_header[16][2]")
del_items(0x801242E0)
SetType(0x801242E0, "struct sjis sjis_table[37]")
del_items(0x801297BC)
SetType(0x801297BC, "unsigned char save_buffer[106496]")
del_items(0x80129724)
SetType(0x80129724, "struct FeTable McLoadGameMenu")
del_items(0x80129704)
SetType(0x80129704, "char *CharFileList[5]")
del_items(0x80129718)
SetType(0x80129718, "char *Classes[3]")
del_items(0x80129740)
SetType(0x80129740, "struct FeTable McLoadCharMenu")
del_items(0x8012975C)
SetType(0x8012975C, "struct FeTable McLoadCard1Menu")
del_items(0x80129778)
SetType(0x80129778, "struct FeTable McLoadCard2Menu")
|
125399
|
import traceback
import services # pylint: disable=import-error
from interactions.base.immediate_interaction import ImmediateSuperInteraction # pylint: disable=import-error,no-name-in-module
from singletons import DEFAULT # pylint: disable=import-error
from event_testing.results import TestResult # pylint: disable=import-error
from sims4.utils import flexmethod # pylint: disable=import-error
from control_any_sim.services.selection_group import SelectionGroupService
from control_any_sim.util.logger import Logger
class SimMakeSelectableInteraction(ImmediateSuperInteraction):
# pylint: disable=too-few-public-methods
@flexmethod
def test(cls, inst, *args, target=DEFAULT, context=None, **kwargs) -> TestResult: # pylint: disable=no-self-argument
try:
inst_or_cls = inst if inst is not None else cls
Logger.log("testing SimMakeSelectableInteraction, context: {} {}"
.format(args, kwargs))
if target:
info_target = target.sim_info
Logger.log('info_target: {}'.format(info_target))
if context is not None and context.target_sim_id is not None:
target_id = context.target_sim_id
info_target = services.sim_info_manager().get(target_id)
Logger.log('info_target: {}'.format(info_target))
sim_is_selectable = (SelectionGroupService
.get(0).is_selectable(info_target.id))
Logger.log("sim_is_selectable: {}".format(sim_is_selectable))
if sim_is_selectable:
fail = TestResult(False, "sim is already selectable", inst)
Logger.log('fail result: {}'.format(repr(fail)))
return fail
if target is None or target.sim_info.id != info_target.id:
return TestResult.TRUE
return (super(SimMakeSelectableInteraction, inst_or_cls)
.test(*args, target=target, context=context, **kwargs))
except BaseException:
Logger.log(traceback.format_exc())
def _run_interaction_gen(self, timeline):
Logger.log("running make selectable interaction...")
try:
super()._run_interaction_gen(timeline)
sim_info = self.target.sim_info
if self.context.target_sim_id is not None:
sim_info = (services.sim_info_manager()
.get(self.context.target_sim_id))
Logger.log("got sim info {} {}"
.format(sim_info.first_name, sim_info.last_name))
SelectionGroupService \
.get(services.active_household_id()) \
.make_sim_selectable(sim_info)
Logger.log("sim is now selectable!")
services.get_first_client().set_active_sim_by_id(sim_info.id)
Logger.log("sim is now active!")
return True
except BaseException:
Logger.log(traceback.format_exc())
class SimMakeNotSelectableInteraction(ImmediateSuperInteraction):
# pylint: disable=too-few-public-methods
@flexmethod
def test(cls, inst, *args, target=DEFAULT, context=None, **kwargs) -> TestResult: # pylint: disable=no-self-argument
inst_or_cls = inst if inst is not None else cls
Logger.log("testing SimMakeNotSelectableInteraction, context: {} {}"
.format(args, kwargs))
if target:
info_target = target.sim_info
Logger.log('info_target: {}'.format(info_target))
if context is not None and context.target_sim_id is not None:
target_id = context.target_sim_id
info_target = services.sim_info_manager().get(target_id)
Logger.log('info_target: {}'.format(info_target))
if cls.must_be_selectable(info_target):
return TestResult(False, "sim is in active household and has to be selectable")
sim_is_selectable = (SelectionGroupService
.get(0).is_selectable(info_target.id))
Logger.log("sim_is_selectable: {}".format(sim_is_selectable))
if not sim_is_selectable:
return TestResult(False, "sim is not selectable", inst)
if target is None or target.sim_info.id != info_target.id:
return TestResult.TRUE
return (super(SimMakeNotSelectableInteraction, inst_or_cls)
.test(*args, target=target, context=context, **kwargs))
def _run_interaction_gen(self, timeline):
Logger.log("running make not selectable interaction...")
try:
super()._run_interaction_gen(timeline)
sim_info = self.target.sim_info
if self.context.target_sim_id is not None:
sim_info = (services.sim_info_manager()
.get(self.context.target_sim_id))
Logger.log("got sim info {} {}"
.format(sim_info.first_name, sim_info.last_name))
SelectionGroupService \
.get(services.active_household_id()) \
.remove_sim(sim_info)
Logger.log("sim is now not selectable anymore!")
return True
except BaseException:
Logger.log(traceback.format_exc())
@classmethod
def must_be_selectable(cls, sim_info):
return services.active_household_id() == sim_info.household_id
class SimAddRoomMateInteraction(ImmediateSuperInteraction):
@flexmethod
def test(cls, inst, *args, target=DEFAULT, context=None, **kwargs) -> TestResult: # pylint: disable=no-self-argument
try:
Logger.log("testing SimAddRoomMateInteraction, context: {} {}".format(args, kwargs))
inst_or_cls = inst if inst is not None else cls
roommate_service = services.get_roommate_service()
household_id = context.sim.sim_info.household_id
if roommate_service is None:
return TestResult.NONE
if target:
info_target = target.sim_info
if context.target_sim_id is not None:
target_id = context.target_sim_id
info_target = services.sim_info_manager().get(target_id)
Logger.log('info_target: {}'.format(info_target))
if context.sim.sim_info.id == info_target.id:
return TestResult(False, "sim can not be it's own roommate", inst)
if roommate_service.is_sim_info_roommate(info_target, household_id):
return TestResult(False, "sim is already roommate of this household")
return (super(SimAddRoomMateInteraction, inst_or_cls)
.test(*args, target=target, context=context, **kwargs))
except BaseException:
Logger.log(traceback.format_exc())
def _run_interaction_gen(self, timeline):
try:
Logger.log("running turn into roommate interaction...")
super()._run_interaction_gen(timeline)
sim_info = self.target.sim_info
home_zone_id = self.get_sim_info_home_zone_id(self.context.sim.sim_info)
if self.context.target_sim_id is not None:
sim_info = (services.sim_info_manager()
.get(self.context.target_sim_id))
Logger.log("got sim info {} {}"
.format(sim_info.first_name, sim_info.last_name))
services.get_roommate_service().add_roommate(sim_info, home_zone_id)
Logger.log("sim is now a roommate!")
return True
except BaseException:
Logger.log(traceback.format_exc())
@staticmethod
def get_sim_info_home_zone_id(sim_info):
if sim_info.household is None:
return 0
home_zone_id = sim_info.household.home_zone_id
if not home_zone_id:
return sim_info.roommate_zone_id
return home_zone_id
class SimRemoveRoomMateInteraction(ImmediateSuperInteraction):
@flexmethod
def test(cls, inst, *args, target=DEFAULT, context=None, **kwargs) -> TestResult: # pylint: disable=no-self-argument
try:
inst_or_cls = inst if inst is not None else cls
roommate_service = services.get_roommate_service()
if roommate_service is None:
return TestResult.NONE
Logger.log("testing SimRemoveRoomMateInteraction, context: {} {}"
.format(args, kwargs))
if target:
info_target = target.sim_info
if context.target_sim_id is not None:
target_id = context.target_sim_id
info_target = services.sim_info_manager().get(target_id)
household_id = context.sim.sim_info.household_id
Logger.log('info_target: {}'.format(info_target))
if context.sim.sim_info.id == info_target.id:
return TestResult(False, "sim can not be it's own roommate", inst)
if not roommate_service.is_sim_info_roommate(info_target, household_id):
return TestResult(False, "sim is not a roommate of current household", inst)
return (super(SimRemoveRoomMateInteraction, inst_or_cls)
.test(*args, target=target, context=context, **kwargs))
except BaseException:
Logger.log(traceback.format_exc())
def _run_interaction_gen(self, timeline):
try:
Logger.log("running remove roommate interaction...")
super()._run_interaction_gen(timeline)
sim_info = self.target.sim_info
if self.context.target_sim_id is not None:
sim_info = (services.sim_info_manager()
.get(self.context.target_sim_id))
Logger.log("got sim info {} {}"
.format(sim_info.first_name, sim_info.last_name))
services.get_roommate_service().remove_roommate(sim_info)
Logger.log("sim is now not a roommate anymore!")
return True
except BaseException:
Logger.log(traceback.format_exc())
class SimHouseholdNpcOnInteraction(ImmediateSuperInteraction):
@flexmethod
def test(cls, inst, *args, target=DEFAULT, context=None, **kwargs) -> TestResult: # pylint: disable=no-self-argument
try:
inst_or_cls = inst if inst is not None else cls
selection_group = SelectionGroupService.get(services.active_household_id())
Logger.log("testing SimHouseholdNpcOnInteraction, context: {} {}"
.format(args, kwargs))
if target:
info_target = target.sim_info
if context.target_sim_id is not None:
target_id = context.target_sim_id
info_target = services.sim_info_manager().get(target_id)
Logger.log('info_target: {}'.format(info_target))
if selection_group.is_household_npc(info_target):
return TestResult(False, "sim is already a household npc", inst)
if info_target.household_id != services.active_household_id():
return TestResult(False, "sim is not a member of the active household", inst)
return (super(SimHouseholdNpcOnInteraction, inst_or_cls)
.test(*args, target=target, context=context, **kwargs))
except BaseException:
Logger.log(traceback.format_exc())
def _run_interaction_gen(self, timeline):
try:
Logger.log("running household npc on interaction...")
super()._run_interaction_gen(timeline)
sim_info = self.target.sim_info
if self.context.target_sim_id is not None:
sim_info = (services.sim_info_manager()
.get(self.context.target_sim_id))
Logger.log("got sim info {} {}"
.format(sim_info.first_name, sim_info.last_name))
selection_group = SelectionGroupService.get(services.active_household_id())
selection_group.add_household_npc(sim_info)
Logger.log("sim is now a household npc!")
return True
except BaseException:
Logger.log(traceback.format_exc())
class SimHouseholdNpcOffInteraction(ImmediateSuperInteraction):
@flexmethod
def test(cls, inst, *args, target=DEFAULT, context=None, **kwargs) -> TestResult: # pylint: disable=no-self-argument
try:
inst_or_cls = inst if inst is not None else cls
selection_group = SelectionGroupService.get(services.active_household_id())
Logger.log("testing SimHouseholdNpcOffInteraction, context: {} {}"
.format(args, kwargs))
if target:
info_target = target.sim_info
if context.target_sim_id is not None:
target_id = context.target_sim_id
info_target = services.sim_info_manager().get(target_id)
Logger.log('info_target: {}'.format(info_target))
if not selection_group.is_household_npc(info_target):
return TestResult(False, "sim is not a household npc", inst)
if info_target.household_id != services.active_household_id():
return TestResult(False, "sim is not a member of the active household", inst)
return (super(SimHouseholdNpcOffInteraction, inst_or_cls)
.test(*args, target=target, context=context, **kwargs))
except BaseException:
Logger.log(traceback.format_exc())
def _run_interaction_gen(self, timeline):
try:
Logger.log("running household npc off interaction...")
super()._run_interaction_gen(timeline)
sim_info = self.target.sim_info
if self.context.target_sim_id is not None:
sim_info = (services.sim_info_manager()
.get(self.context.target_sim_id))
Logger.log("got sim info {} {}"
.format(sim_info.first_name, sim_info.last_name))
selection_group = SelectionGroupService.get(services.active_household_id())
selection_group.remove_household_npc(sim_info)
Logger.log("sim is now a normal household member!")
return True
except BaseException:
Logger.log(traceback.format_exc())
|
125420
|
import os,glob
filenames = [os.path.splitext(os.path.basename(f))[0] for f in glob.glob(os.path.dirname(__file__)+"/*.py")]
filenames.remove('__init__')
__all__ = filenames
|
125490
|
from simple_settings import settings
from simple_settings.utils import settings_stub
# Stub examples
with settings_stub(SOME_SETTING='foo'):
assert settings.SOME_SETTING == 'foo'
assert settings.SOME_SETTING == 'bar'
@settings_stub(SOME_SETTING='foo')
def get_some_setting():
return settings.SOME_SETTING
assert get_some_setting() == 'foo'
assert settings.SOME_SETTING == 'bar'
|
125570
|
class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
start, end = 0, len(nums)- 1
while start <= end:
mid = start + (end - start) // 2
if nums[mid] < target:
start = mid + 1
elif nums[mid] > target:
end = mid - 1
else:
return mid
return -1
|
125579
|
import torch
from abc import abstractmethod
from numpy import inf
import numpy as np
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, fold_id):
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
# setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.criterion = criterion
self.metric_ftns = metric_ftns
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
self.fold_id = fold_id
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abstractmethod
def _train_epoch(self, epoch, total_epochs):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
not_improved_count = 0
all_outs = []
all_trgs = []
for epoch in range(self.start_epoch, self.epochs + 1):
result, epoch_outs, epoch_trgs = self._train_epoch(epoch, self.epochs)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
all_outs.extend(epoch_outs)
all_trgs.extend(epoch_trgs)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = True
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
outs_name = "outs_" + str(self.fold_id)
trgs_name = "trgs_" + str(self.fold_id)
np.save(self.config._save_dir / outs_name, all_outs)
np.save(self.config._save_dir / trgs_name, all_trgs)
if self.fold_id == self.config["data_loader"]["args"]["num_folds"] - 1:
self._calc_metrics()
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def _save_checkpoint(self, epoch, save_best=True):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger.info("Saving current best: model_best.pth ...")
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['arch'] != self.config['arch']:
self.logger.warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
def _calc_metrics(self):
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import pandas as pd
import os
from os import walk
n_folds = self.config["data_loader"]["args"]["num_folds"]
all_outs = []
all_trgs = []
outs_list = []
trgs_list = []
save_dir = os.path.abspath(os.path.join(self.checkpoint_dir, os.pardir))
for root, dirs, files in os.walk(save_dir):
for file in files:
if "outs" in file:
outs_list.append(os.path.join(root, file))
if "trgs" in file:
trgs_list.append(os.path.join(root, file))
if len(outs_list)==self.config["data_loader"]["args"]["num_folds"]:
for i in range(len(outs_list)):
outs = np.load(outs_list[i])
trgs = np.load(trgs_list[i])
all_outs.extend(outs)
all_trgs.extend(trgs)
all_trgs = np.array(all_trgs).astype(int)
all_outs = np.array(all_outs).astype(int)
r = classification_report(all_trgs, all_outs, digits=6, output_dict=True)
cm = confusion_matrix(all_trgs, all_outs)
df = pd.DataFrame(r)
df["cohen"] = cohen_kappa_score(all_trgs, all_outs)
df["accuracy"] = accuracy_score(all_trgs, all_outs)
df = df * 100
file_name = self.config["name"] + "_classification_report.xlsx"
report_Save_path = os.path.join(save_dir, file_name)
df.to_excel(report_Save_path)
cm_file_name = self.config["name"] + "_confusion_matrix.torch"
cm_Save_path = os.path.join(save_dir, cm_file_name)
torch.save(cm, cm_Save_path)
# Uncomment if you want to copy some of the important files into the experiement folder
# from shutil import copyfile
# copyfile("model/model.py", os.path.join(self.checkpoint_dir, "model.py"))
# copyfile("model/loss.py", os.path.join(self.checkpoint_dir, "loss.py"))
# copyfile("trainer/trainer.py", os.path.join(self.checkpoint_dir, "trainer.py"))
# copyfile("train_Kfold_CV.py", os.path.join(self.checkpoint_dir, "train_Kfold_CV.py"))
# copyfile("config.json", os.path.join(self.checkpoint_dir, "config.json"))
# copyfile("data_loader/data_loaders.py", os.path.join(self.checkpoint_dir, "data_loaders.py"))
|
125591
|
from re import compile, finditer
REGEX = compile(r'\{\{([a-zA-Z]+)\}\}')
REPLS = ('{{', '{'), ('}}', '}')
def create_template(s):
def my_template(**kwargs):
keys = {a.group(1): '' for a in finditer(REGEX, s)}
keys.update(kwargs)
return reduce(lambda a, kv: a.replace(*kv), REPLS, s).format(**keys)
return my_template
|
125640
|
from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : display the 16 first
"""
filename = 'S1R1.mat'
A = Antenna(filename,directory='ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
#
# Calculate Vector Spherical Harmonics
#
A = vsh(A,dsf)
A.C.s1tos2(15)
EBr,EBi,ECr,ECi= A.Fsynth2s()
plt.figure()
plt.subplot(221)
plt.plot(EBr)
plt.subplot(222)
plt.plot(EBi)
plt.subplot(223)
plt.plot(ECr)
plt.subplot(224)
plt.plot(ECi)
plt.subplot(224)
|
125682
|
class Solution:
def findJudge(self, N: int, trust: List[List[int]]) -> int:
E = len(trust)
if E < N - 1:
return -1
trustScore = [0] * N
for a, b in trust:
trustScore[a - 1] -= 1
trustScore[b - 1] += 1
for index, t in enumerate(trustScore, 1):
if t == N - 1:
return index
return -1
|
125724
|
from javax.swing.event import ListSelectionListener
class IssueListener(ListSelectionListener):
def __init__(self, view, table, scanner_pane, issue_name, issue_param):
self.view = view
self.table = table
self.scanner_pane = scanner_pane
self.issue_name = issue_name
self.issue_param = issue_param
def valueChanged(self, e):
row = self.table.getSelectedRow()
issue_param = self.table.getModel().getValueAt(row, 1)
hostname = self.table.getModel().getValueAt(row, 2)
path = self.table.getModel().getValueAt(row, 3)
scanner_issue_id = self.table.getModel().getValueAt(row, 4)
self.view.set_tabbed_pane(self.scanner_pane, self.table, hostname, path, self.issue_name, issue_param, scanner_issue_id)
|
125739
|
import collections
import numbers
import torch
import torch.nn.functional as F
from types import SimpleNamespace as nm
from .bioes import entities_jie_bioes
from .viterbi import decode_bioes_logits, INFTY
EPSILON = 1.e-8
def token_and_record_accuracy(logits, labels):
'''Computes accuracy metric from logits and true labels.
Takes care not to count padding for variable-length records.
Input:
logits - float tensor [B, S, L]
labels - integer tensor [B, S]
Where:
B - batch size (> 0)
S - max sequence length (> 0). Labels are padded with zeroes.
L - size of label vocabulary
Returns:
a dict with the following keys:
records - number of records (aka batch size)
correct_records - how many records were correct (a
record is correct when all its tokens were predicted correctly)
tokens - number of tokens in this batch
correct_tokens - how many tokens were correctly predicted in this batch
'''
assert logits.size(0) == labels.size(0)
batch_size = logits.size(0)
mask = (labels > 0).long()
seqlen = mask.sum(dim=1)
pred = torch.argmax(logits, dim=2)
correct_toks = ((pred==labels).long() * mask).sum(dim=1)
numtokens = seqlen.sum().item()
return dict(
records = batch_size,
correct_records = (correct_toks==seqlen).long().sum().item(),
tokens = numtokens,
correct_tokens = correct_toks.sum().item(),
)
def massage_bert_logits_and_labels(logits, labels):
'''Fixes BERT logits and labels.
BERT uses leading and trailing dummy tokens ([CLS] and [SEP]), and therefore
its logits and labels can not be directly used to compute performance metric.
This function removes dummy tokens.
'''
assert logits.size(0) == labels.size(0)
assert logits.size(1) == labels.size(1)
# skip leading [CLS] and trailing [SEP] tokens
mask = (labels > 0).long()[:,2:]
logits = logits[:,1:-1,:]
labels = labels[:,1:-1] * mask
return logits, labels
def entity_scores(logits, labels, labels_vocab, entity_decoder='fast', bioes_labels=None):
'''Computes entity performance metric.
Input:
logits - a float tensor of shape [B, S, L]
labels - an int tensor of shape [B, S], labels are zero-padded
labels_vocab - Vocab object to decode labels
entity_decoder = one of 'fast' or 'viterbi'. Dictates how to decode logits:
'fast' - use argmax on logits and then use heuristic to resolve invalid label sequences
'viterbi' - use Viterbi decoder to find optimal valid label sequence
bioes_labels - set of labels (before I-,B-,E-,S- prefixes are added).
Only needed for Viterbi decoder.
Returns:
a dict with the following keys:
tp - number of true positive entities
fp - number of false positive entities
fn - number of false negative entities
records - number of records processed (aka batch size)
correct_records - number of records where true and predicted
entities matched exactly
'''
if entity_decoder not in ('fast', 'viterbi'):
raise ValueError('Invalid value for "entity_decoder". Expect "fast" or "viterbi". Got: ' + entity_decoder)
if entity_decoder == 'viterbi' and bioes_labels is None:
raise ValueError('Parameter "bioes_labels" is required when "entity_decoder" is "viterbi"')
batch_size = logits.size(0)
pred = torch.argmax(logits, dim=2)
mask = (labels > 0).long()
seqlen = mask.sum(dim=1)
def logits_factory_factory(logits, labels_vocab):
def logits_factory(t, label):
if label not in labels_vocab:
return -INFTY
return logits[t, labels_vocab.encode(label)]
return logits_factory
tp = 0
fp = 0
fn = 0
records = 0
correct_records = 0
for i in range(batch_size):
true_labels = labels[i, :seqlen[i]].tolist()
true_labels = [labels_vocab.decode(i) for i in true_labels]
true_entities = set(entities_jie_bioes(true_labels))
if entity_decoder == 'viterbi':
_, pred_labels = decode_bioes_logits(
seqlen[i].item(),
logits_factory=logits_factory_factory(logits[i], labels_vocab=labels_vocab),
labels=bioes_labels
)
else:
pred_labels = pred[i,:seqlen[i]].tolist()
pred_labels = [labels_vocab.decode(i) for i in pred_labels]
pred_entities = set(entities_jie_bioes(pred_labels))
tpx = len(pred_entities & true_entities)
fpx = len(pred_entities - true_entities)
fnx = len(true_entities - pred_entities)
if fpx == 0 and fnx == 0:
correct_records += 1
records += 1
tp += tpx
fn += fnx
fp += fpx
return dict(
tp=tp,
fp=fp,
fn=fn,
records=records,
correct_records=correct_records,
)
def get_bioes_labels(labels):
'''Computes set of core labels from their BIOES expansion'''
labels = list(x for x in labels if x not in ('O', '<pad>', '<unk>'))
assert all(x[:2] in ('B-', 'I-', 'E-', 'S-') for x in labels)
return set(x[2:] for x in labels)
class Mean:
'''Mean aggregator'''
def __init__(self, iterable=None):
self._acc = collections.defaultdict(float)
self._num = collections.defaultdict(int)
if iterable:
self.update(iterable)
def reset(self):
self._acc.clear()
self._num.clear()
return self
def update(self, iterable):
for val in iterable:
self += val
def __iadd__(self, other):
for key, value in other.items():
self._acc[key] += value
self._num[key] += 1
return self
@property
def value(self):
return {
key: val / self._num[key]
for key, val in self._acc.items()
}
def accumulator(self, name):
return self._acc[name]
def count(self, name):
return self._num[name]
class Ema:
'''EMA aggregator'''
def __init__(self, tau=0.1):
self._value = collections.defaultdict(float)
self.tau = tau
def reset(self):
self._value = None
def __iadd__(self, other):
for key, val in other.items():
if key not in self._value:
self._value[key] = val
else:
self._value[key] += (val - self._value[key]) * self.tau
return self
@property
def value(self):
return dict(self._value)
class Metric:
def __init__(self, acc=None):
if acc is None:
acc = Mean() # by default, compute mean statistics
self.acc = acc
def reset(self):
self.acc.reset()
return self
def append(self, output, target, **losses):
raise NotImplementedError()
def update(self, iterable):
for output, target in iterable:
self.append(output, target)
@property
def summary(self):
return self.acc.value
def __repr__(self):
return repr(self.summary)
class TokenAndRecordAccuracy(Metric):
def append(self, output, target, **losses):
self.acc += token_and_record_accuracy(output, target)
self.acc += losses
return self
@property
def summary(self):
summ = self.acc.value
tokens = summ.pop('tokens')
correct_tokens = summ.pop('correct_tokens')
records = summ.pop('records')
correct_records = summ.pop('correct_records')
summ['racc'] = correct_records / (records + EPSILON)
summ['tacc'] = correct_tokens / (tokens + EPSILON)
return summ
class TokenAndRecordAccuracyBert(TokenAndRecordAccuracy):
def append(self, output, target, **losses):
output, target = massage_bert_logits_and_labels(output, target)
return super().append(output, target, **losses)
class F1Score(Metric):
def __init__(self, labels_vocab, entity_decoder='fast', acc=None):
Metric.__init__(self, acc)
if entity_decoder not in ('fast', 'viterbi'):
raise ValueError('Invalid value for "entity_decoder", accept only "fast" and "viterbi": ' + entity_decoder)
self._labels_vocab = labels_vocab
self._entity_decoder = entity_decoder
self._bioes_labels = get_bioes_labels(labels_vocab.values)
def append(self, output, target, **losses):
self.acc += entity_scores(output, target,
labels_vocab=self._labels_vocab, entity_decoder=self._entity_decoder, bioes_labels=self._bioes_labels)
self.acc += losses
return self
@property
def summary(self):
summ = self.acc.value
tp, fp, fn = summ.pop('tp'), summ.pop('fp'), summ.pop('fn')
records = summ.pop('records')
correct_records = summ.pop('correct_records')
prec = tp / (tp + fp + EPSILON)
recall = tp / (tp + fn + EPSILON)
f1 = 2 * prec * recall / (prec + recall + EPSILON)
summ['prec'] = prec
summ['recall'] = recall
summ['f1'] = f1
summ['racc'] = correct_records / (records + EPSILON)
return summ
class F1ScoreBert(F1Score):
def append(self, output, target, **losses):
output, target = massage_bert_logits_and_labels(output, target)
return super().append(output, target, **losses)
class CrossEntropyLoss(Metric):
def append(self, output, target, **losses):
output = output.transpose(1, 2)
loss = F.cross_entropy(output, target, ignore_index=0, reduction='mean')
self.acc += {'loss': loss.item()}
return self
class MetricSet(Metric):
def __init__(self, *configs):
self._configs = configs
def reset(self):
for config in self._configs:
for metric in config.values():
metric.reset()
return self
def append(self, output, target, **losses):
for config in self._configs:
for metric in config.values():
metric.append(output, target, **losses)
@property
def summary(self):
out = {}
for config in self._configs:
for key, m in config.items():
for subkey, value in m.summary.items():
out[key+'.'+subkey] = value
return out
|
125793
|
import os
import sys
__all__ = ['ENVS_AND_VALS']
# Exercises both namespaced and simple-named settings variables
ENVS_AND_VALS = [("TAPISPY_PAGE_SIZE", 9000),
("TAPISPY_LOG_LEVEL", "CRITICAL"),
("TENANT_DNS_DOMAIN", "tacc.dev"),
("TACC_PROJECT_NAME", "TACO_SUPERPOWERS")]
|
125808
|
expected_normal_output = """Title Release Year Estimated Budget
Shawshank Redemption 1994 $25 000 000
The Godfather 1972 $6 000 000
The Godfather: Part II 1974 $13 000 000
The Dark Knight 2008 $185 000 000
12 Angry Men 1957 $350 000"""
expected_markdown_output = """Title | Release Year | Estimated Budget
:----------------------|:-------------|:----------------
Shawshank Redemption | 1994 | $25 000 000
The Godfather | 1972 | $6 000 000
The Godfather: Part II | 1974 | $13 000 000
The Dark Knight | 2008 | $185 000 000
12 Angry Men | 1957 | $350 000"""
expected_right_justified_output = """ Title Release Year Estimated Budget
Shawshank Redemption 1994 $25 000 000
The Godfather 1972 $6 000 000
The Godfather: Part II 1974 $13 000 000
The Dark Knight 2008 $185 000 000
12 Angry Men 1957 $350 000"""
expected_tab_output = """Some parameter Other parameter Last parameter
CONST 123456 12.45"""
expected_header_output = """------------------------------------------------------
Title Release Year Estimated Budget
------------------------------------------------------
Shawshank Redemption 1994 $25 000 000
The Godfather 1972 $6 000 000
The Godfather: Part II 1974 $13 000 000
The Dark Knight 2008 $185 000 000
12 Angry Men 1957 $350 000"""
expected_short_output = """Title Release Year Estimated Budget
Shawshank Redemption 1994 $25 000 000
The Godfather 1972 $6 000 000"""
expected_oneline_output = """Title Release Year Estimated Budget"""
expected_one_column_output = """Estimated Budget
$25 000 000
$6 000 000
$13 000 000
$185 000 000
$350 000"""
expected_justified_markdown_output = """Title | Release Year | Estimated Budget
:----------------------|-------------:|----------------:
Shawshank Redemption | 1994 | $25 000 000
The Godfather | 1972 | $6 000 000
The Godfather: Part II | 1974 | $13 000 000
The Dark Knight | 2008 | $185 000 000
12 Angry Men | 1957 | $350 000"""
expected_header_with_decorator_output = """----------------------- o -------------- o -----------------
Title o Release Year o Estimated Budget
----------------------- o -------------- o -----------------
Shawshank Redemption o 1994 o $25 000 000
The Godfather o 1972 o $6 000 000
The Godfather: Part II o 1974 o $13 000 000
The Dark Knight o 2008 o $185 000 000
12 Angry Men o 1957 o $350 000"""
expected_latex_output = r"""\begin{tabular}{lll}
Title & Release Year & Estimated Budget \\
Shawshank Redemption & 1994 & 25 000 000 \\
The Godfather & 1972 & 6 000 000 \\
The Godfather: Part II & 1974 & 13 000 000 \\
The Dark Knight & 2008 & 185 000 000 \\
12 Angry Men & 1957 & 350 000 \\
\end{tabular}"""
expected_latex_with_justification_output = r"""\begin{tabular}{lrr}
Title & Release Year & Estimated Budget \\
Shawshank Redemption & 1994 & 25 000 000 \\
The Godfather & 1972 & 6 000 000 \\
The Godfather: Part II & 1974 & 13 000 000 \\
The Dark Knight & 2008 & 185 000 000 \\
12 Angry Men & 1957 & 350 000 \\
\end{tabular}"""
NORMAL_FILENAME = 'examples/imdb.csv'
test_cases = [
([NORMAL_FILENAME], expected_normal_output),
([NORMAL_FILENAME, '--markdown'], expected_markdown_output),
([NORMAL_FILENAME, '-a', 'l'], expected_normal_output),
([NORMAL_FILENAME, '-a', 'r'], expected_right_justified_output),
(['examples/small.tsv', '-s', 'tab'], expected_tab_output),
([NORMAL_FILENAME, '-s', 'comma'], expected_normal_output),
([NORMAL_FILENAME, '--header'], expected_header_output),
([NORMAL_FILENAME, '-n', '3'], expected_short_output),
([NORMAL_FILENAME, '-n', '1'], expected_oneline_output),
([NORMAL_FILENAME, '--markdown', '-a', 'l', 'r', 'r'], expected_justified_markdown_output),
([NORMAL_FILENAME, '--header', '-d', ' o '], expected_header_with_decorator_output),
(['examples/imdb-latex.csv', '--latex'], expected_latex_output),
(['examples/imdb-latex.csv', '--latex', '-a', 'l', 'r', 'r'], expected_latex_with_justification_output),
([NORMAL_FILENAME, '--c', '3'], expected_one_column_output)
]
|
125813
|
import os
# src_dir = "/usr/lib/x86_64-linux-gnu"
src_dir = "/mnt/drive_c/datasets/kaju/opencv_libs"
# dst_dir = None
dst_dir = None
libname = "opencv"
# libversion = "1.58.0"
# leading . needed
src_libversion = ""
dst_libversion = ".4.0.0"
dry_run = True
if not dst_dir:
dst_dir = src_dir
files = os.listdir(src_dir)
for file in files:
if file.startswith("lib" + libname) and file.endswith(".so" + src_libversion):
print("Creating symlink for file " + file)
# src = os.path.join(dir, file + libversion)
file_split = file.split(".")
filename = file_split[0] + "." + file_split[1]
src_file = os.path.join(src_dir, filename + src_libversion)
dst_file = os.path.join(dst_dir, filename + dst_libversion)
print("Src file: " + src_file)
print("Dst file: " + dst_file)
if not dry_run:
try:
os.symlink(src_file, dst_file)
except OSError:
os.remove(dst_file)
os.symlink(src_file, dst_file)
|
125869
|
import pytest
import base64
from mock import MagicMock
from volttrontesting.utils.utils import AgentMock
from volttron.platform.vip.agent import Agent
from volttroncentral.platforms import PlatformHandler, Platforms
from volttroncentral.agent import VolttronCentralAgent
@pytest.fixture
def mock_vc():
VolttronCentralAgent.__bases__ = (AgentMock.imitate(Agent, VolttronCentralAgent()),)
vc = VolttronCentralAgent()
vc._configure("test_config", "NEW", {})
yield vc
def test_when_platform_added_disconnected(mock_vc):
platforms = Platforms(vc=mock_vc)
assert platforms
assert len(platforms.get_platform_vip_identities()) == 0
assert len(platforms.get_platform_list(None, None)) == 0
new_platform_vip = "vcp-test_platform"
platforms.add_platform(new_platform_vip)
assert len(platforms.get_platform_vip_identities()) == 1
assert len(platforms.get_platform_list(None, None)) == 1
encoded_vip = base64.b64encode(new_platform_vip.encode('utf-8')).decode('utf-8')
platform = platforms.get_platform(encoded_vip)
assert isinstance(platform, PlatformHandler)
assert platform.vip_identity == new_platform_vip
platforms.disconnect_platform(new_platform_vip)
assert len(platforms.get_platform_list(None, None)) == 0
assert len(platforms.get_platform_vip_identities()) == 0
def test_platform_added_during_handle_platform_connection():
scaneventmock = MagicMock()
platformsmock = MagicMock()
vc = VolttronCentralAgent()
vc._platform_scan_event = scaneventmock
vc._platforms = platformsmock
vip_id = "vcp-platform1"
vc._handle_platform_connection(vip_id)
assert platformsmock.add_platform.called
def test_platform_scan():
vipmock = MagicMock()
peerlistmock = MagicMock()
peerlistmock.return_value.get.return_value = ["vcp-1", "vcp-2"]
vipmock.peerlist = peerlistmock
coremock = MagicMock()
vc = VolttronCentralAgent()
vc.vip = vipmock
vc.core = coremock
# scanning of platform test starts here.
vc._scan_platform_connect_disconnect()
assert len(vc._platforms.get_platform_vip_identities()) == 2
assert "vcp-1" in vc._platforms.get_platform_vip_identities()
assert "vcp-2" in vc._platforms.get_platform_vip_identities()
assert len(vc._platforms.get_platform_list(None, None)) == 2
|
125954
|
import pandas as pd
import flexmatcher
# Let's assume that the mediated schema has three attributes
# movie_name, movie_year, movie_rating
# creating one sample DataFrame where the schema is (year, Movie, imdb_rating)
vals1 = [['year', 'Movie', 'imdb_rating'],
['2001', 'Lord of the Rings', '8.8'],
['2010', 'Inception', '8.7'],
['1999', 'The Matrix', '8.7']]
header = vals1.pop(0)
data1 = pd.DataFrame(vals1, columns=header)
# specifying mapping between schema of the dataframe and the mediated schema
data1_mapping = {'year': 'movie_year', 'imdb_rating': 'movie_rating',
'Movie': 'movie_name'}
# creating another sample DataFrame where the schema is
# (title, produced, popularity)
vals2 = [['title', 'produced', 'popularity'],
['The Godfather', '1972', '9.2'],
['Silver Linings Playbook', '2012', '7.8'],
['The Big Short', '2015', '7.8']]
header = vals2.pop(0)
data2 = pd.DataFrame(vals2, columns=header)
# specifying mapping between schema of the dataframe and the mediated schema
data2_mapping = {'popularity': 'movie_rating', 'produced': 'movie_year',
'title': 'movie_name'}
# creating a list of dataframes and their mappings
schema_list = [data1, data2]
mapping_list = [data1_mapping, data2_mapping]
# creating the third dataset (which is our test dataset)
# we assume that we don't know the mapping and we want FlexMatcher to find it.
vals3 = [['rt', 'id', 'yr'],
['8.5', 'The Pianist', '2002'],
['7.7', 'The Social Network', '2010']]
header = vals3.pop(0)
data3 = pd.DataFrame(vals3, columns=header)
# Using Flexmatcher
fm = flexmatcher.FlexMatcher(schema_list, mapping_list, sample_size=100)
fm.train() # train flexmatcher
predicted_mapping = fm.make_prediction(data3)
# printing the predictions
print ('FlexMatcher predicted that "rt" should be mapped to ' +
predicted_mapping['rt'])
print ('FlexMatcher predicted that "yr" should be mapped to ' +
predicted_mapping['yr'])
print ('FlexMatcher predicted that "id" should be mapped to ' +
predicted_mapping['id'])
|
125963
|
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def tabulate_events(dir_path):
summary_iterators = [EventAccumulator(os.path.join(dir_path, dname)).Reload() for dname in os.listdir(dir_path)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert it.Tags()['scalars'] == tags
out = defaultdict(list)
steps = []
for tag in tags:
steps = [e.step for e in summary_iterators[0].Scalars(tag)]
wall_times = [e.wall_time for e in summary_iterators[0].Scalars(tag)]
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
return out, steps, wall_times
def to_csv(log_dir_path, csv_dir_path):
dirs = os.listdir(log_dir_path)
d, steps, wall_times = tabulate_events(log_dir_path)
tags, values = zip(*d.items())
np_values = np.array(values)
csv_columns = ['step', 'wall_time']
csv_columns.extend(dirs)
print('extend', ['step', 'wall_time'].extend(dirs))
print('csv_columns', csv_columns)
for index, tag in enumerate(tags):
# df = pd.DataFrame(np_values[index], index=steps, columns=dirs)
df = pd.DataFrame(np.vstack((steps, wall_times, np_values[index].T)).T, columns=csv_columns)
df.to_csv(get_csv_file_path(csv_dir_path, tag), index=False)
def get_csv_file_path(csv_dir_path, tag):
file_name = tag.replace("/", "_") + '.csv'
folder_path = os.path.join(csv_dir_path, 'csv')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return os.path.join(folder_path, file_name)
if __name__ == '__main__':
# example
train_id = 'SR_1D_CNN_SAMPLE-TRAIN'
log_dir_path = "/var/tensorflow/tsp/sample/logs/{}/".format(train_id)
csv_dir_path = "/var/tensorflow/tsp/sample/history/{}/".format(train_id)
to_csv(log_dir_path, csv_dir_path)
|
125993
|
import re
from uuid import UUID
from typing import Union
class BTUUID(UUID):
"""An extension of the built-in UUID class with some utility functions for converting Bluetooth UUID16s to and from UUID128s."""
_UUID16_UUID128_FMT = "0000{0}-0000-1000-8000-00805F9B34FB"
_UUID16_UUID128_RE = re.compile(
"^0000([0-9A-F]{4})-0000-1000-8000-00805F9B34FB$", re.IGNORECASE
)
_UUID16_RE = re.compile("^(?:0x)?([0-9A-F]{4})$", re.IGNORECASE)
@classmethod
def from_uuid16(cls, id: Union[str, int]) -> "BTUUID":
"""Converts an integer or 4 digit hex string to a Bluetooth compatible UUID16.
Args:
id (Union[str, int]): The UUID representation to convert.
Raises:
ValueError: Raised if the supplied UUID16 is not valid.
Returns:
BTUUID: The resulting UUID.
"""
hex = "0000"
if type(id) is str:
match = cls._UUID16_RE.search(id)
if not match:
raise ValueError("id is not a valid UUID16")
hex = match.group(1)
elif type(id) is int:
if id > 65535 or id < 0:
raise ValueError("id is out of range")
hex = "{:04X}".format(id)
return cls(cls._UUID16_UUID128_FMT.format(hex))
@classmethod
def from_uuid16_128(cls, id: str) -> "BTUUID":
"""Converts a 4 or 32 digit hex string to a bluetooth compatible UUID16.
Raises:
ValueError: Raised if the supplied string is not a valid UUID.
Returns:
BTUUID: The resulting UUID.
"""
if len(id) == 4:
return cls.from_uuid16(id)
else:
uuid = cls(id)
try:
# If the result wont convert to a uuid16 then it must be invalid.
_ = uuid.uuid16
except ValueError:
raise ValueError("id is not a valid uuid16")
return uuid
@property
def uuid16(self) -> str:
"""Converts the UUID16 to a 4 digit string representation.
Raises:
ValueError: Raised if this UUID is not a valid UUID16.
Returns:
str: The UUID representation.
"""
match = self._UUID16_UUID128_RE.search(str(self))
if not match:
raise ValueError("self is not a uuid16")
return match.group(1)
|
126065
|
from torch.nn.modules.loss import _Loss
import torch
from enum import Enum
from typing import Union
class Mode(Enum):
BINARY = "binary"
MULTICLASS = "multiclass"
MULTILABEL = "multilabel"
class Reduction(Enum):
SUM = "sum"
MEAN = "mean"
NONE = "none"
SAMPLE_SUM = "sample_sum" # mean by sample dim + sum by batch dim
def _reduce(x: torch.Tensor, reduction: Union[str, Reduction]="mean") -> torch.Tensor:
r"""Reduce input in batch dimension if needed.
Args:
x: Tensor with shape (N, *).
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
"""
reduction = Reduction(reduction)
if reduction == Reduction.NONE:
return x
elif reduction == Reduction.MEAN:
return x.mean()
elif reduction == Reduction.SUM:
return x.sum()
else:
raise ValueError("Uknown reduction. Expected one of {'none', 'mean', 'sum'}")
class Loss(_Loss):
"""Loss which supports addition and multiplication"""
def __add__(self, other):
if isinstance(other, Loss):
return SumOfLosses(self, other)
else:
raise ValueError("Loss should be inherited from `Loss` class")
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, value):
if isinstance(value, (int, float)):
return WeightedLoss(self, value)
else:
raise ValueError("Loss should be multiplied by int or float")
def __rmul__(self, other):
return self.__mul__(other)
class WeightedLoss(Loss):
"""
Wrapper class around loss function that applies weighted with fixed factor.
This class helps to balance multiple losses if they have different scales
"""
def __init__(self, loss, weight=1.0):
super().__init__()
self.loss = loss
self.register_buffer("weight", torch.tensor([weight]))
def forward(self, *inputs):
return self.loss(*inputs) * self.weight[0]
class SumOfLosses(Loss):
def __init__(self, l1, l2):
super().__init__()
self.l1 = l1
self.l2 = l2
def __call__(self, *inputs):
return self.l1(*inputs) + self.l2(*inputs)
|
126095
|
from setuptools import setup, find_packages
import re
# Get the version, following advice from https://stackoverflow.com/a/7071358/851699
VERSIONFILE="artemis/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(
name='artemis-ml',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/quva-lab/artemis',
long_description='Artemis aims to get rid of all the boring, bureaucratic coding (plotting, file management, etc) involved in machine learning projects, so you can get to the good stuff quickly.',
install_requires=['numpy', 'scipy', 'matplotlib', 'pytest', 'pillow', 'tabulate', 'si-prefix', 'enum34'],
extras_require = {
'remote_plotting': ["paramiko", "netifaces"]
},
version=verstr,
packages=find_packages(),
scripts=[])
|
126104
|
from nlgen.cfg import CFG, PTerminal, PUnion
def test_simple_production_union():
cfg = CFG([
("S", PUnion([
PTerminal("foo"),
PTerminal("bar")
])),
])
expect = [("foo",), ("bar",)]
result = list(cfg.permutation_values("S"))
assert expect == result
def test_equality():
assert (PUnion([PTerminal("foo"), PTerminal("bar")])
==
PUnion([PTerminal("foo"), PTerminal("bar")]))
|
126150
|
from __future__ import print_function
import os.path
import sys
import json
from collections import OrderedDict
from itertools import chain
from dmcontent import ContentLoader, utils
_base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def _get_questions_by_type(framework_slug, doc_type, question_types):
manifest_name = '{}_search_filters'.format(doc_type)
loader = ContentLoader(_base_dir)
loader.load_manifest(
framework_slug,
doc_type,
manifest_name,
)
manifest = loader.get_manifest(framework_slug, manifest_name)
return (q for q in sum((s.questions for s in manifest.sections), []) if q.type in question_types)
def _checkbox_tree_transformation_generator(checkbox_tree_question):
def update_ancestors_dict(options, leaf_values_dict, parents):
for option in options:
children = option.get('options', [])
if not children:
if parents:
if parents not in leaf_values_dict:
leaf_values_dict[parents] = list()
# list of child-values preserves order from the source yaml, for the benefit of
# git history in output file
leaf_values_dict[parents].append(utils.get_option_value(option))
else:
update_ancestors_dict(children, leaf_values_dict, parents.union([utils.get_option_value(option)]))
leaf_values_by_ancestor_set = OrderedDict() # again, preserve order from source yaml
update_ancestors_dict(checkbox_tree_question.options, leaf_values_by_ancestor_set, parents=frozenset())
return [
{
'append_conditionally': OrderedDict((
('field', checkbox_tree_question.id),
('any_of', child_values),
('append_value', sorted(ancestor_values)),
))
} for ancestor_values, child_values in leaf_values_by_ancestor_set.items()
]
def _derived_options_transformation_generator(checkbox_question):
retval = [
{
'append_conditionally': OrderedDict((
('field', option['derived_from']['question']),
('target_field', checkbox_question.id),
('any_of', option['derived_from']['any_of']),
('append_value', [utils.get_option_value(option)]),
))
}
for option in checkbox_question.get('options')
if option.get('derived_from', None) is not None
]
return retval
TRANSFORMATION_GENERATORS = {
'checkbox_tree': _checkbox_tree_transformation_generator,
'checkboxes': _derived_options_transformation_generator,
'radios': _derived_options_transformation_generator
}
def get_transformations(framework_slug, doc_type):
for question in _get_questions_by_type(framework_slug, doc_type, TRANSFORMATION_GENERATORS.keys()):
for transformer in TRANSFORMATION_GENERATORS[question.type](question):
yield transformer
def generate_search_mapping(framework_slug, doc_type, file_handle, mapping_type, extra_meta={}):
with open(os.path.join(
_base_dir,
"frameworks",
framework_slug,
"search_mappings",
"{}.json".format(doc_type),
), 'r') as h_template:
mapping_json = json.load(h_template, object_pairs_hook=OrderedDict) # preserve template order for git history
mappings = mapping_json["mappings"]
# Elasticsearch 7 removes mapping types by default
# https://www.elastic.co/guide/en/elasticsearch/reference/7.10/removal-of-types.html
include_type_name = False
if mapping_type in mappings:
include_type_name = True
original_meta = mappings[mapping_type].get("_meta", {}) if include_type_name else mappings.get("_meta", {})
# starting our final _meta dict from scratch so we can ensure extra_meta gets the top spot, in ordered output
meta = OrderedDict((
*((k, v) for k, v in extra_meta.items()),
# we want entries from original_meta to come *after* entries from extra_meta, but want to extra_meta entries
# to override original_meta, so ignore original_meta entries which are already in extra_meta
*((k, v) for k, v in original_meta.items() if k not in extra_meta),
("transformations", list(chain(
extra_meta.get("transformations", ()),
original_meta.get("transformations", ()),
get_transformations(framework_slug, doc_type),
))),
))
if include_type_name:
mappings[mapping_type]["_meta"] = meta
else:
mappings["_meta"] = meta
json.dump(mapping_json, file_handle, indent=2, separators=(',', ': '))
print('', file=file_handle)
def generate_config(framework_slug, doc_type, extra_meta, output_dir=None):
if output_dir:
with open(os.path.join(output_dir, '{}-{}.json'.format(doc_type, framework_slug)), 'w') as base_mapping:
generate_search_mapping(framework_slug, doc_type, base_mapping, doc_type, extra_meta)
else:
generate_search_mapping(framework_slug, doc_type, sys.stdout, doc_type, extra_meta)
|
126152
|
def get_context_user(context):
if 'user' in context:
return context['user']
elif 'request' in context:
return getattr(context['request'], 'user', None)
|
126195
|
from importlib import import_module
from py2swagger.plugins import Py2SwaggerPlugin, Py2SwaggerPluginException
from py2swagger.introspector import BaseDocstringIntrospector
from py2swagger.utils import OrderedDict
class FalconMethodIntrospector(BaseDocstringIntrospector):
def get_operation(self):
"""
Get full swagger operation object
:return: swagger operation object
:rtype: OrderedDict
"""
operation = OrderedDict(
tags=self.parser.get_tags(),
summary=self.parser.get_summary(),
description=self.parser.get_description(),
parameters=self.parameters,
produces=None,
consumes=None,
responses=self.responses,
security=self.security
)
for key, value in list(operation.items()):
# Remove empty keys
if not value:
operation.pop(key)
return operation
def get_security_definitions(self):
return self.security_definitions
class FalconPy2SwaggerPlugin(Py2SwaggerPlugin):
help = 'Plugin for Falcon Framework applications'
filtered_methods = ('method_not_allowed', 'on_options')
def set_parser_arguments(self, parser):
parser.add_argument('app', help='Falcon application. Example: project.api:app')
def run(self, arguments, *args, **kwargs):
module_name, application_name = arguments.app.split(':', 1)
try:
m = import_module(module_name)
except ImportError:
raise Py2SwaggerPluginException('No module named {}'.format(module_name))
app = getattr(m, application_name, None)
if app is None or not hasattr(app, '_router'):
raise Py2SwaggerPluginException('Invalid Falcon application {}'.format(application_name))
paths = {}
security_definitions = {}
for path, method_map in self.generate_routes(app._router._roots):
if path not in paths:
paths[path] = {}
for method in method_map:
f = method_map[method]
if hasattr(f, '__self__') and f.__name__ not in self.filtered_methods:
method_introspector = FalconMethodIntrospector(f)
operation = method_introspector.get_operation()
paths[path][method.lower()] = operation
security_definitions.update(method_introspector.get_security_definitions())
swagger_part = {
'paths': paths,
'definitions': {},
'security_definitions': security_definitions
}
return swagger_part
def generate_routes(self, nodes, path=''):
for node in nodes:
node_path = '{}/{}'.format(path, node.raw_segment)
if node.children:
for item in self.generate_routes(node.children, node_path):
yield item
else:
yield (node_path, node.method_map)
|
126241
|
import setuptools
import versioneer
from pathlib import Path
# Extract information from the README file and embed it in the package.
readme_path = Path(__file__).absolute().parent / "README.md"
with open(readme_path, "r") as fh:
long_description = fh.read()
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
cmdclass=versioneer.get_cmdclass(),
description="Utilities to decode CAN log files",
install_requires=[
"numpy"
],
long_description=long_description,
long_description_content_type="text/markdown",
name="can_decoder",
packages=setuptools.find_packages(),
python_requires='>=3.5',
url="https://github.com/CSS-Electronics/can_decoder",
version=versioneer.get_version(),
)
|
126244
|
a=int(input("Input an integer :"))
n1=int("%s"%a)
n2=int("%s%s"%(a,a))
n3=int("%s%s%s"%(a,a,a))
print(n1+n2+n3)
|
126254
|
from pydantic import validator, ValidationError, Field
from .types import BaseModel, Union, Optional, Literal, List
from typing import Dict
import pathlib
class ParasiticValues(BaseModel):
mean: int = 0
min: int = 0
max: int = 0
class Layer(BaseModel):
name: str
gds_layer_number: int
gds_data_type: Optional[Dict[str, int]] = Field(default_factory=lambda: {"draw": 0})
class LayerMetal(Layer):
direction: Literal['h', 'v']
min_length: int
max_length: Optional[int]
min_end_to_end: int
offset: int
width: Union[int, List[int]]
space: Union[int, List[int]]
color: Optional[List[str]]
stop_pitch: int
stop_point: int
stop_offset: int
unit_c: Optional[Dict[int, ParasiticValues]]
unit_r: Optional[Dict[int, ParasiticValues]]
unit_cc: Optional[Dict[int, ParasiticValues]]
@validator('name')
def _validate_name(cls, v):
assert v.startswith('M'), f'Metal layer name {v} should start with M'
return v
@validator('min_length', 'min_end_to_end', 'width', 'space', 'stop_pitch', 'stop_point')
def _validate_positive(cls, v):
if isinstance(v, List):
assert min(v) > 0, f'Values {v} should be positive'
else:
assert v > 0, f'Value {v} should be positive'
return v
@validator('stop_offset')
def _validate_non_negative(cls, v):
if isinstance(v, List):
assert min(v) >= 0, f'Values {v} should be non-negative'
else:
assert v >= 0, f'Value {v} should be positive'
return v
@validator('space')
def _validate_width(cls, v, values):
if isinstance(v, List):
assert len(v) == len(values['width']), f'width and space length should match'
return v
class LayerVia(Layer):
class Config:
allow_mutation = True
stack: List[str]
width_x: int
width_y: int
space_x: int
space_y: int
layer_l_width: Optional[List[int]] = None
layer_l_enc_x: Optional[int] = 0
layer_l_enc_y: Optional[int] = 0
layer_h_width: Optional[List[int]] = None
layer_h_enc_x: Optional[int] = 0
layer_h_enc_y: Optional[int] = 0
unit_r: Optional[Dict[int, ParasiticValues]]
@validator('stack')
def _validate_stack(cls, v):
assert len(v) == 2
return v
class PDK(BaseModel):
class Config:
allow_mutation = True
name: str
layers: Dict[str, Union[LayerMetal, LayerVia]] = Field(default_factory=lambda: {})
scale_factor: int = 1
@validator('layers')
def _validate_via(cls, layers):
for key, via in layers.items():
if isinstance(via, LayerVia):
ml, mh = via.stack
assert ml in layers, f'Lower layer {ml} not found for {key} {layers.keys()}'
assert mh in layers, f'Higher layer {mh} not found for {key}'
assert layers[ml].direction != layers[mh].direction, f'Lower and higher layer directions are not orthogonal'
if via.layer_l_width is None:
via.layer_l_width = layers[ml].width.copy()
if via.layer_h_width is None:
via.layer_h_width = layers[mh].width.copy()
return layers
def add_layer(self, layer):
assert layer.name not in self.layers
self.layers[layer.name] = layer
def generate_adr_collaterals(self, write_path: pathlib.Path, x_pitch: int, x_grid: int, y_pitch: int, y_grid: int, region: List[int]):
with open(write_path/"adr_forbidden_patterns.txt", "wt") as fp:
# TODO: Write rules for horizontal and vertical via spacing
fp.write(f'\n')
with open(write_path/"adr_options.txt", "wt") as fp:
fp.write(f'Option name=gr_region_width_in_poly_pitches value={x_grid}\n')
fp.write(f'Option name=gr_region_height_in_diff_pitches value={y_grid}\n')
with open(write_path/"adr_design_rules.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerMetal):
fp.write(f'Rule name={name}_minete type=minete value={layer.min_end_to_end} layer={name}\n')
fp.write(f'Rule name={name}_minlength type=minlength value={layer.min_length} layer={name}\n')
with open(write_path/"adr_metal_templates.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerMetal):
line = f'MetalTemplate layer={name} name={name}_template_0'
line += f' widths={",".join(str(i) for i in layer.width)}'
line += f' spaces={",".join(str(i) for i in layer.space)}'
if layer.color is not None and len(layer.color) > 0:
line += f' colors={",".join(str(i) for i in layer.color)}'
line += " stops=%s" % (",".join( str(i) for i in [layer.stop_pitch - 2*layer.stop_point, 2*layer.stop_point]))
line += '\n'
fp.write(line)
# Single metal template instance. Generalize to multiple as needed in the future.
with open(write_path/"adr_metal_templates_instances.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerMetal):
line = f'MetalTemplateInstance template={name}_template_0'
line += f' pgdoffset_abs={layer.offset}'
line += f' ogdoffset_abs={layer.stop_point}'
line += f' region={":".join(str(i) for i in region)}'
line += '\n'
fp.write(line)
def _via_string(via: LayerVia):
via_str = f'Generator name={via.name}_{via.width_x}_{via.width_y} {{ \n'
via_str += f' Layer1 value={via.stack[0]} {{\n'
via_str += f' x_coverage value={via.layer_l_enc_x}\n'
via_str += f' y_coverage value={via.layer_l_enc_y}\n'
via_str += f' widths value={",".join(str(i) for i in via.layer_l_width)}\n'
via_str += f' }}\n'
via_str += f' Layer2 value={via.stack[1]} {{\n'
via_str += f' x_coverage value={via.layer_h_enc_x}\n'
via_str += f' y_coverage value={via.layer_h_enc_y}\n'
via_str += f' widths value={",".join(str(i) for i in via.layer_h_width)}\n'
via_str += f' }}\n'
via_str += f' CutWidth value={via.width_x}\n'
via_str += f' CutHeight value={via.width_y}\n'
via_str += f' cutlayer value={via.name}\n'
via_str += f'}}\n'
return via_str
with open(write_path/"adr_via_generators.txt", "wt") as fp:
for name, layer in self.layers.items():
if isinstance(layer, LayerVia):
via_str = _via_string(layer)
fp.write(via_str)
fp.write(f'\n')
with open(write_path/"adr_layers.txt", "wt") as fp:
# Dummy layer required for global grid
line = f'Layer name=diffusion pgd=hor level=0 {{\n'
line += f' Type value=diffusion\n'
line += f' Technology pitch={y_pitch}\n'
line += f'}}\n'
fp.write(line)
# Dummy layer required for global grid
line = f'Layer name=wirepoly pgd=ver level=1 {{\n'
line += f' Type value=wire\n'
line += f' Type value=poly\n'
line += f' Technology pitch={x_pitch}\n'
line += f'}}\n'
fp.write(line)
# identify electrical connectivity
connected_layers = dict()
for name, layer in self.layers.items():
if isinstance(layer, LayerVia):
ml = layer.stack[0]
mh = layer.stack[1]
connected_layers[name] = [ml, mh]
if ml not in connected_layers:
connected_layers[ml] = []
connected_layers[ml].append(name)
if mh not in connected_layers:
connected_layers[mh] = []
connected_layers[mh].append(name)
level = 2
for i in range(0, 99):
name = f'M{i}'
if name in self.layers:
layer = self.layers[name]
pgd = 'ver' if layer.direction == 'v' else 'hor'
line = f'Layer name={name} pgd={pgd} level={level} {{\n'
line += f' Type value=wire\n'
line += f' Type value=metal\n'
for l in connected_layers[name]:
line += f' ElectricallyConnected layer={l}\n'
line += f'}}\n'
fp.write(line)
level +=1
name = f'V{i}'
if name in self.layers:
line = f'Layer name={name} level={level} {{\n'
line += f' Type value=via\n'
for l in connected_layers[name]:
line += f' ElectricallyConnected layer={l}\n'
line += f'}}\n'
fp.write(line)
level +=1
fp.write(f'\n')
|
126317
|
from django.test import TestCase
import pytest
from ...test_assets.utils import get_taxbrain_model
from ...test_assets.test_models import (TaxBrainTableResults,
TaxBrainFieldsTest)
from ...dynamic.models import DynamicBehaviorOutputUrl
from ...dynamic.forms import DynamicBehavioralInputsModelForm
class TaxBrainDynamicResultsTest(TaxBrainTableResults, TestCase):
def test_dynamic_tc_lt_0130(self):
self.tc_lt_0130(self.test_coverage_behavioral_fields,
Form=DynamicBehavioralInputsModelForm,
UrlModel=DynamicBehaviorOutputUrl)
def test_dynamic_tc_gt_0130(self):
self.tc_gt_0130(self.test_coverage_behavioral_fields,
Form=DynamicBehavioralInputsModelForm,
UrlModel=DynamicBehaviorOutputUrl)
class TaxBrainDynamicFieldsTest(TaxBrainFieldsTest, TestCase):
def test_set_fields(self):
start_year = 2017
fields = self.test_coverage_behavioral_gui_fields.copy()
fields['first_year'] = start_year
self.parse_fields(start_year, fields,
Form=DynamicBehavioralInputsModelForm)
def test_data_source_puf(self):
start_year = 2017
fields = self.test_coverage_behavioral_gui_fields.copy()
fields['first_year'] = start_year
fields['data_source'] = 'PUF'
model = self.parse_fields(start_year, fields,
Form=DynamicBehavioralInputsModelForm,
use_puf_not_cps=True)
assert model.use_puf_not_cps
def test_get_model_specs_with_errors(self):
start_year = 2017
fields = self.test_coverage_behavioral_gui_fields.copy()
fields['BE_sub'] = [-0.8]
fields['BE_inc'] = [0.2]
fields['first_year'] = start_year
fields['data_source'] = 'PUF'
model = self.parse_fields(start_year, fields,
Form=DynamicBehavioralInputsModelForm,
use_puf_not_cps=True)
(reform_dict, assumptions_dict, reform_text, assumptions_text,
errors_warnings) = model.get_model_specs()
assert len(errors_warnings['behavior']['errors']) > 0
assert len(errors_warnings['behavior']['warnings']) == 0
assert len(errors_warnings['policy']['errors']) == 0
assert len(errors_warnings['policy']['warnings']) == 0
|
126331
|
import asyncio
import pytest
from panini.async_test_client import AsyncTestClient
from panini import app as panini_app
def run_panini():
app = panini_app.App(
service_name="async_test_client_test_error_handling",
host="127.0.0.1",
port=4222,
)
@app.listen("async_test_client.test_error_handling")
async def listen(msg):
error = 1 // 0
return {"success": False if error else True}
app.start()
@pytest.fixture
async def client():
client = await AsyncTestClient(run_panini=run_panini).start()
yield client
await client.stop()
@pytest.mark.asyncio
async def test_error(client):
subject = "async_test_client.test_error_handling"
with pytest.raises(asyncio.TimeoutError):
await client.request(subject, {})
|
126348
|
class Alphabet:
"""
Bijective mapping from strings to integers.
>>> a = Alphabet()
>>> [a[x] for x in 'abcd']
[0, 1, 2, 3]
>>> list(map(a.lookup, range(4)))
['a', 'b', 'c', 'd']
>>> a.stop_growth()
>>> a['e']
>>> a.freeze()
>>> a.add('z')
Traceback (most recent call last):
...
ValueError: Alphabet is frozen. Key "z" not found.
>>> print(a.plaintext())
a
b
c
d
"""
def __init__(self):
self._mapping = {} # str -> int
self._flip = {} # int -> str; timv: consider using array or list
self._i = 0
self._frozen = False
self._growing = True
def __repr__(self):
return 'Alphabet(size=%s,frozen=%s)' % (len(self), self._frozen)
def freeze(self):
self._frozen = True
def stop_growth(self):
self._growing = False
@classmethod
def from_iterable(cls, s):
"Assumes keys are strings."
inst = cls()
for x in s:
inst.add(x)
# inst.freeze()
return inst
def keys(self):
return self._mapping.iterkeys()
def items(self):
return self._mapping.iteritems()
def imap(self, seq, emit_none=False):
"""
Apply alphabet to sequence while filtering. By default, `None` is not
emitted, so the Note that the output sequence may have fewer items.
"""
if emit_none:
for s in seq:
yield self[s]
else:
for s in seq:
x = self[s]
if x is not None:
yield x
def map(self, seq, *args, **kwargs):
return list(self.imap(seq, *args, **kwargs))
def add_many(self, x):
for k in x:
self.add(k)
def lookup(self, i):
if i is None:
return None
#assert isinstance(i, int)
return self._flip[i]
def lookup_many(self, x):
return map(self.lookup, x)
def __contains__(self, k):
#assert isinstance(k, basestring)
return k in self._mapping
def __getitem__(self, k):
try:
return self._mapping[k]
except KeyError:
#if not isinstance(k, basestring):
# raise ValueError("Invalid key (%s): only strings allowed." % (k,))
if self._frozen:
raise ValueError('Alphabet is frozen. Key "%s" not found.' % (k,))
if not self._growing:
return None
x = self._mapping[k] = self._i
self._i += 1
self._flip[x] = k
return x
add = __getitem__
def __setitem__(self, k, v):
assert k not in self._mapping
assert isinstance(v, int)
self._mapping[k] = v
self._flip[v] = k
def __iter__(self):
for i in range(len(self)):
yield self._flip[i]
def enum(self):
for i in range(len(self)):
yield (i, self._flip[i])
def tolist(self):
"Ordered list of the alphabet's keys."
return [self._flip[i] for i in range(len(self))]
def __len__(self):
return len(self._mapping)
def plaintext(self):
"assumes keys are strings"
return '\n'.join(self)
@classmethod
def load(cls, filename):
if not os.path.exists(filename):
return cls()
with open(filename) as f:
return cls.from_iterable(l.strip() for l in f)
def save(self, filename):
with open(filename, 'w') as f:
f.write(self.plaintext())
def __eq__(self, other):
return self._mapping == other._mapping
|
126397
|
import tensorflow as tf
def deconvLayer(x,kernelSize,outMaps,stride): #default caffe style MRSA
with tf.variable_scope(None,default_name="deconv"):
inMaps = x.get_shape()[3]
kShape = [kernelSize,kernelSize,outMaps,inMaps]
w = tf.get_variable("weights",shape=kShape,initializer=tf.uniform_unit_scaling_initializer())
tf.add_to_collection("weights",w)
inshape = x.get_shape()
outShape = tf.stack([inshape[0],inshape[1]*stride,inshape[2]*stride,outMaps],name="shapeEval")
deconv = tf.nn.conv2d_transpose(x,w,outShape,[1,stride,stride,1],padding="SAME")
return deconv
|
126420
|
from distutils.core import setup, Extension
m = Extension('tinyobjloader',
sources = ['main.cpp', '../tiny_obj_loader.cc'])
setup (name = 'tinyobjloader',
version = '0.1',
description = 'Python module for tinyobjloader',
ext_modules = [m])
|
126429
|
import numpy as np
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
import os
bitsize = 1024
total_sample = 110913349
data_save_folder = './data'
file = './data/%s_%s.npy' % (total_sample, bitsize)
f = np.memmap(file, dtype = np.bool, shape = (total_sample, bitsize))
def _sum(memmap, x):
return memmap[x, ].sum()
P = Parallel(n_jobs=16) #
res = P(delayed(_sum)(f, i) for i in tqdm(range(total_sample)))
pd.Series(res).to_pickle('./data/%s_%s_NumOnBits.pkl' % (total_sample, bitsize))
print('Done!')
|
126437
|
from typing import Any, Dict, List, Tuple
from streamlit_prophet.lib.utils.holidays import get_school_holidays_FR
COUNTRY_NAMES_MAPPING = {
"FR": "France",
"US": "United States",
"UK": "United Kingdom",
"CA": "Canada",
"BR": "Brazil",
"MX": "Mexico",
"IN": "India",
"CN": "China",
"JP": "Japan",
"DE": "Germany",
"IT": "Italy",
"RU": "Russia",
"BE": "Belgium",
"PT": "Portugal",
"PL": "Poland",
}
COVID_LOCKDOWN_DATES_MAPPING = {
"FR": [
("2020-03-17", "2020-05-11"),
("2020-10-30", "2020-12-15"),
("2021-03-20", "2021-05-03"),
]
}
SCHOOL_HOLIDAYS_FUNC_MAPPING = {
"FR": get_school_holidays_FR,
}
def convert_into_nb_of_days(freq: str, horizon: int) -> int:
"""Converts a forecasting horizon in number of days.
Parameters
----------
freq : str
Dataset frequency.
horizon : int
Forecasting horizon in dataset frequency units.
Returns
-------
int
Forecasting horizon in days.
"""
mapping = {
"s": horizon // (24 * 60 * 60),
"H": horizon // 24,
"D": horizon,
"W": horizon * 7,
"M": horizon * 30,
"Q": horizon * 90,
"Y": horizon * 365,
}
return mapping[freq]
def convert_into_nb_of_seconds(freq: str, horizon: int) -> int:
"""Converts a forecasting horizon in number of seconds.
Parameters
----------
freq : str
Dataset frequency.
horizon : int
Forecasting horizon in dataset frequency units.
Returns
-------
int
Forecasting horizon in seconds.
"""
mapping = {
"s": horizon,
"H": horizon * 60 * 60,
"D": horizon * 60 * 60 * 24,
"W": horizon * 60 * 60 * 24 * 7,
"M": horizon * 60 * 60 * 24 * 30,
"Q": horizon * 60 * 60 * 24 * 90,
"Y": horizon * 60 * 60 * 24 * 365,
}
return mapping[freq]
def dayname_to_daynumber(days: List[Any]) -> List[Any]:
"""Converts a list of day names into a list of day numbers from 0 (Monday) to 6 (Sunday).
Parameters
----------
days : list
Day names.
Returns
-------
list
Day numbers from 0 (Monday) to 6 (Sunday).
"""
day_names = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
mapping = {day: i for i, day in enumerate(day_names)}
return [mapping[day] for day in days]
def mapping_country_names(countries: List[Any]) -> Tuple[Dict[Any, Any], List[Any]]:
"""Converts a list of country long names into a list of country short names.
Parameters
----------
countries : list
Country long names.
Returns
-------
dict
Mapping used for the conversion.
list
Country short names.
"""
mapping = {v: k for k, v in COUNTRY_NAMES_MAPPING.items()}
return mapping, [mapping[country] for country in countries]
def mapping_freq_names(freq: str) -> str:
"""Converts a short frequency name into a long frequency name.
Parameters
----------
freq : str
Short frequency name.
Returns
-------
str
Long frequency name.
"""
mapping = {
"s": "seconds",
"H": "hours",
"D": "days",
"W": "weeks",
"M": "months",
"Q": "quarters",
"Y": "years",
}
return mapping[freq]
|
126438
|
import sys, os
from read_struc import read_struc
from math import sin, cos
import numpy as np
def euler2rotmat(phi,ssi,rot):
cs=cos(ssi)
cp=cos(phi)
ss=sin(ssi)
sp=sin(phi)
cscp=cs*cp
cssp=cs*sp
sscp=ss*cp
sssp=ss*sp
crot=cos(rot)
srot=sin(rot)
r1 = crot * cscp + srot * sp
r2 = srot * cscp - crot * sp
r3 = sscp
r4 = crot * cssp - srot * cp
r5 = srot * cssp + crot * cp
r6 = sssp
r7 = -crot * ss
r8 = -srot * ss
r9 = cs
return ((r1,r2,r3),(r4,r5,r6),(r7,r8,r9))
datfile = sys.argv[1]
header, strucs = read_struc(open(datfile))
strucs = list(strucs)
pivots = []
for h in header:
if not h.startswith("#pivot"):
h = h.rstrip()
if h.startswith("#centered"): assert h.endswith(" false"), h
continue
assert not h.startswith("#pivot auto"), h
hh = h.split()
assert hh[1] == str(len(pivots)+1), h
assert len(hh) == 5, h
pivot = [float(v) for v in hh[2:5]]
pivots.append(np.array(pivot))
results = []
for struc in strucs:
result_struc = []
for lnr, l in enumerate(struc[1]):
ll = [float(v) for v in l.split()]
assert len(ll) == 6 #no ensembles
rotmat = euler2rotmat(*ll[:3])
rotmat = np.array(rotmat)
trans = np.array(ll[3:6])
p = pivots[lnr]
pp = (-p * rotmat).sum(axis=1) + p
trans += pp
result = np.eye(4)
result[:3,:3] = rotmat
result[:3,3] = trans
result[3][3] = 1
result_struc.append(result.tolist())
results.append(result_struc)
import json
print(json.dumps(results, indent=2))
|
126464
|
import matplotlib.pyplot as plt
from cleanco import cleanco
from nltk.corpus import names, gazetteers
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from nltk.tokenize import TweetTokenizer
plt.style.use('ggplot')
import nltk
import scipy.stats
from sklearn.metrics import make_scorer
from sklearn.grid_search import RandomizedSearchCV
import sklearn_crfsuite
from sklearn_crfsuite import metrics
#from sklearn.model_selection import train_test_split
nltk.corpus.conll2002.fileids()
stop = set(stopwords.words('english'))
NAMES = set([name.lower() for filename in ('male.txt', 'female.txt') for name in names.words(filename)])
PERSON_PREFIXES = ['mr', 'mrs', 'ms', 'miss', 'dr', 'rev', 'judge',
'justice', 'honorable', 'hon', 'rep', 'sen', 'sec',
'minister', 'chairman', 'succeeding', 'says', 'president']
PERSON_SUFFIXES = ['sr', 'jr', 'phd', 'md']
ORG_SUFFIXES = ['ltd', 'inc', 'co', 'corp', 'plc', 'llc', 'llp', 'gmbh',
'corporation', 'associates', 'partners', 'committee',
'institute', 'commission', 'university', 'college',
'airlines', 'magazine']
COUNTRIES = set([country for filename in ('isocountries.txt','countries.txt')
for country in gazetteers.words(filename)])
lancaster_stemmer = LancasterStemmer()
wordnet_lemmatizer = WordNetLemmatizer()
tknzr = TweetTokenizer(preserve_case=True, strip_handles=False, reduce_len=False)
#train_sents = list(nltk.corpus.conll2002.iob_sents('esp.train'))
#test_sents = list(nltk.corpus.conll2002.iob_sents('esp.testb'))
def get_tuples(dspath):
sentences = []
s = ''
tokens = []
ners = []
poss = []
tot_sentences = 0
ners_by_position = []
index = 0
with open(dspath) as f:
for line in f:
if line.strip() != '':
token = line.split('\t')[0].decode('utf-8')
ner = line.split('\t')[1].replace('\r', '').replace('\n', '').decode('utf-8')
'''
if ner in definitions.NER_TAGS_ORG:
ner = 'ORG'
elif ner in definitions.NER_TAGS_LOC:
ner = 'LOC'
elif ner in definitions.NER_TAGS_PER:
ner = 'PER'
else :
ner = 'O'
'''
#ners_by_position.append([index, len(token), ner])
index += len(token) + 1
if line.strip() == '':
if len(tokens) != 0:
#poss = [x[1].decode('utf-8') for x in nltk.pos_tag(nltk.word_tokenize(s[:-1]))]
poss = [x[1].decode('utf-8') for x in nltk.pos_tag(tknzr.tokenize(s[:-1]))]
#if len(poss) == len(tokens): # tokenization doesnt affect position of NERs, i.e., same tokenization
sentences.append(zip(tokens, poss, ners))
#else:
# aux = 0
# for i in range(len()):
# if aux <= tokens[i]
tokens = []
ners = []
s = ''
tot_sentences += 1
else:
s += token + ' '
tokens.append(token)
ners.append(ner)
return sentences
#file_reader = open(f, 'r')
#for line in file_reader.readlines():
# x = [line.split('\t')[0] ]
#train_sents = zip(x, y, z)
dataset_rit = get_tuples('/Users/esteves/Github/horus-models/data/dataset/Ritter/ner.txt')
dataset_wnut15 = get_tuples('/Users/esteves/Github/horus-models/data/dataset/wnut/2015.conll.freebase')
dataset_wnut16 = get_tuples('/Users/esteves/Github/horus-models/data/dataset/wnut/2016.conll.freebase')
dataset_wnut16_processed = '/Users/esteves/Github/horus-models/output/experiments/EXP_001/wnut16.horus.conll'
#dataset = dataset_wnut16
#t = int(round(0.7*len(dataset)-1,1))
#train_sents = dataset[0:t]
#test_sents = dataset[t+1:(len(dataset)-1)]
train_sents = dataset_rit
test_sents = dataset_wnut15
#train_sents = dataset_wnut16
#test_sents = dataset_rit
# features: word identity, word suffix, word shape and word POS tag
text = nltk.Text(word.lower() for word in nltk.corpus.brown.words())
def get_similar_words_pos(word):
sim = text.similar(word)
if sim != None:
poss = tknzr.tokenize(sim)
return Counter(sim)[0]
else:
return '-'
def hasNumbers(text):
return any(char.isdigit() for char in text)
def word2features(sent, i):
word = sent[i][0]
postag = sent[i][1]
anal = cleanco(word)
features = {
'bias': 1.0,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit(),
'postag': postag,
'postag[:2]': postag[:2],
'stop_word': word in stop,
'hyphen': '-' in word,
'size_small': True if len(word) <= 2 else False,
#'wordnet_lemmatizer': wordnet_lemmatizer.lemmatize(word),
'stemmer_lanc': lancaster_stemmer.stem(word),
#'has_number': hasNumbers(word),
#'postag_similar_max': get_similar_words_pos(word)
#'gaz_per': True if word in NAMES else False
}
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-1:word.lower()': word1.lower(),
'-1:word.istitle()': word1.istitle(),
'-1:word.isupper()': word1.isupper(),
'-1:postag': postag1,
'-1:postag[:2]': postag1[:2],
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.update({
'+1:word.lower()': word1.lower(),
'+1:word.istitle()': word1.istitle(),
'+1:word.isupper()': word1.isupper(),
'+1:postag': postag1,
'+1:postag[:2]': postag1[:2],
})
else:
features['EOS'] = True
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def sent2tokens(sent):
return [token for token, postag, label in sent]
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
# feature_extraction
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.088,
c2=0.002,
max_iterations=100,
all_possible_transitions=True
)
crf.fit(X_train, y_train)
# eval
labels = list(crf.classes_)
labels.remove('O')
labels.remove('B-facility')
labels.remove('I-facility')
labels.remove('B-movie')
labels.remove('I-movie')
labels.remove('B-musicartist')
labels.remove('I-musicartist')
labels.remove('B-other')
labels.remove('I-other')
labels.remove('B-product')
labels.remove('I-product')
labels.remove('B-sportsteam')
labels.remove('I-sportsteam')
labels.remove('B-tvshow')
if 'I-tvshow' in labels:
labels.remove('I-tvshow')
y_pred = crf.predict(X_test)
metrics.flat_f1_score(y_test, y_pred, average='weighted', labels=labels)
# group B and I results
sorted_labels = sorted(
labels,
key=lambda name: (name[1:], name[0])
)
print(metrics.flat_classification_report(
y_test, y_pred, labels=sorted_labels, digits=3
))
exit(0)
# define fixed parameters and parameters to search
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
max_iterations=100,
all_possible_transitions=True
)
params_space = {
'c1': scipy.stats.expon(scale=0.5),
'c2': scipy.stats.expon(scale=0.05),
}
# use the same metric for evaluation
f1_scorer = make_scorer(metrics.flat_f1_score,
average='weighted', labels=labels)
# search
rs = RandomizedSearchCV(crf, params_space,
cv=3,
verbose=1,
n_jobs=-1,
n_iter=5,
scoring=f1_scorer)
rs.fit(X_train, y_train)
# crf = rs.best_estimator_
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
_x = [s.parameters['c1'] for s in rs.grid_scores_]
_y = [s.parameters['c2'] for s in rs.grid_scores_]
_c = [s.mean_validation_score for s in rs.grid_scores_]
fig = plt.figure()
fig.set_size_inches(12, 12)
ax = plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('C1')
ax.set_ylabel('C2')
ax.set_title("Randomized Hyperparameter Search CV Results (min={:0.3}, max={:0.3})".format(
min(_c), max(_c)
))
ax.scatter(_x, _y, c=_c, s=60, alpha=0.9, edgecolors=[0,0,0])
fig.savefig('crf_optimization.png')
print("Dark blue => {:0.4}, dark red => {:0.4}".format(min(_c), max(_c)))
crf = rs.best_estimator_
y_pred = crf.predict(X_test)
print(metrics.flat_classification_report(
y_test, y_pred, labels=sorted_labels, digits=3
))
from collections import Counter
def print_transitions(trans_features):
for (label_from, label_to), weight in trans_features:
print("%-6s -> %-7s %0.6f" % (label_from, label_to, weight))
print("Top likely transitions:")
print_transitions(Counter(crf.transition_features_).most_common(20))
print("\nTop unlikely transitions:")
print_transitions(Counter(crf.transition_features_).most_common()[-20:])
def print_state_features(state_features):
for (attr, label), weight in state_features:
print("%0.6f %-8s %s" % (weight, label, attr))
print("Top positive:")
print_state_features(Counter(crf.state_features_).most_common(30))
print("\nTop negative:")
print_state_features(Counter(crf.state_features_).most_common()[-30:])
|
126502
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pytest
import freud
matplotlib.use("agg")
class TestGaussianDensity:
def test_random_point_with_cell_list(self):
fftpack = pytest.importorskip("scipy.fftpack")
fft = fftpack.fft
fftshift = fftpack.fftshift
width = 20
r_max = 10.0
sigma = 0.1
num_points = 10000
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
for w in (width, (width, width), [width, width]):
gd = freud.density.GaussianDensity(w, r_max, sigma)
# Test access
with pytest.raises(AttributeError):
gd.box
with pytest.raises(AttributeError):
gd.density
gd.compute((box, points))
# Test access
gd.box
gd.density
# Verify the output dimensions are correct
assert gd.density.shape == (width, width)
assert np.prod(gd.density.shape) == np.prod(gd.width)
myDiff = gd.density
myFFT = fft(fft(myDiff[:, :], axis=1), axis=0)
myDiff = (myFFT * np.conj(myFFT)).real
myDiff = fftshift(myDiff)[:, :]
npt.assert_equal(
np.where(myDiff == np.max(myDiff)),
(np.array([width // 2]), np.array([width // 2])),
)
def test_change_box_dimension(self):
width = 20
r_max = 9.9
sigma = 0.01
num_points = 100
box_size = r_max * 3.1
# test that a 3D system computed after computing a 2D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd = freud.density.GaussianDensity(width, r_max, sigma)
gd.compute((box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
with pytest.raises(ValueError):
gd.compute((test_box, test_points))
# test that a 2D system computed after computing a 3D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
gd = freud.density.GaussianDensity(width, r_max, sigma)
gd.compute((box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=True
)
with pytest.raises(ValueError):
gd.compute((test_box, test_points))
def test_sum_2d(self):
# Ensure that each point's Gaussian sums to 1
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd.compute(system=(box, points))
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), num_points, rtol=1e-4)
def test_sum_3d(self):
# Ensure that each point's Gaussian sums to 1
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
box, points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
gd.compute(system=(box, points))
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), num_points, rtol=1e-4)
def test_sum_values_2d(self):
# Ensure that the Gaussian convolution sums to the sum of the values
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
system = freud.data.make_random_system(box_size, num_points, is2D=True)
values = np.random.rand(num_points)
gd.compute(system, values)
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), np.sum(values), rtol=1e-4)
def test_sum_values_3d(self):
# Ensure that the Gaussian convolution sums to the sum of the values
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
system = freud.data.make_random_system(box_size, num_points, is2D=False)
values = np.random.rand(num_points)
gd.compute(system, values)
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), np.sum(values), rtol=1e-4)
def test_repr(self):
gd = freud.density.GaussianDensity(100, 10.0, 0.1)
assert str(gd) == str(eval(repr(gd)))
# Use both signatures
gd3 = freud.density.GaussianDensity((98, 99, 100), 10.0, 0.1)
assert str(gd3) == str(eval(repr(gd3)))
def test_repr_png(self):
width = 20
r_max = 2.0
sigma = 0.01
num_points = 100
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd = freud.density.GaussianDensity(width, r_max, sigma)
with pytest.raises(AttributeError):
gd.plot()
assert gd._repr_png_() is None
gd.compute((box, points))
gd.plot()
gd = freud.density.GaussianDensity(width, r_max, sigma)
test_box = freud.box.Box.cube(box_size)
gd.compute((test_box, points))
gd.plot()
assert gd._repr_png_() is None
plt.close("all")
|
126507
|
from os.path import basename, splitext
import cv2
import json
from src.img_utility import BBCor_to_pts, vertices_rearange
# return a list of BB coordinates [[x1, y1], [x2, y2]]
def CCPD_BBCor_info(img_path):
img_path = basename(img_path)
BBCor = img_path.split('-')[2].split('_')
return [map(int, BBCor[0].split('&')), map(int, BBCor[1].split('&'))]
# return a list of vertices coordinates [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
def CCPD_vertices_info(img_path):
img_path = basename(img_path)
vertices = img_path.split('-')[3].split('_')
return [map(int, vertices[0].split('&')), map(int, vertices[1].split('&')),
map(int, vertices[2].split('&')), map(int, vertices[3].split('&'))]
# used for the CCPD_FR training data, read the LP vertices [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
def CCPD_FR_vertices_info(img_path):
img_path = basename(img_path)
vertices = img_path.split('.')[0].split('_')
return [map(int, vertices[0].split('&')), map(int, vertices[1].split('&')),
map(int, vertices[2].split('&')), map(int, vertices[3].split('&'))]
# return the vertices for front and rear for CCPD_FR format [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
def CCPD_FR_front_rear_info(img_path):
shape = cv2.imread(img_path).shape
w = shape[1]
h = shape[0]
return [[w, h], [0, h], [0, 0], [w, 0]]
# return [[tl], [br]], tl, br in format [x, y]
def openALPR_BBCor_info(img_path):
notation_file = splitext(img_path)[0] + '.txt'
shape = cv2.imread(img_path).shape
with open(notation_file, 'r') as f:
context = f.readline().split()
BBCor = context[1:5]
BBCor = map(int, BBCor)
x_min = max(BBCor[0], 0)
x_max = min(BBCor[0] + BBCor[2], shape[1])
y_min = max(BBCor[1], 0)
y_max = min(BBCor[1] + BBCor[3], shape[0])
return [[x_min, y_min], [x_max, y_max]]
# [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
def vernex_front_rear_info(img_path):
img_name = basename(img_path)
vertices = img_name.split('.')[0].split('_')
return [map(int, vertices[4].split('&')), map(int, vertices[5].split('&')),
map(int, vertices[6].split('&')), map(int, vertices[7].split('&'))]
# [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
def vernex_vertices_info(img_path):
img_name = basename(img_path)
vertices = img_name.split('.')[0].split('_')
return [map(int, vertices[0].split('&')), map(int, vertices[1].split('&')),
map(int, vertices[2].split('&')), map(int, vertices[3].split('&'))]
# return string, 'front' ro 'rear'
def vernex_fr_class_info(img_path):
img_name = basename(img_path)
ele = img_name.split('.')[0].split('_')
return ele[8]
# read the json file including lp and fr annotations
# it will return the lp and fr coordinate start at br and clock-wise
# return -> w, h, class, {vertices indexed by 'lp' or 'front' or 'rear'}
# this function is only supported for one-lp and one-fr
def json_lp_fr(json_path):
with open(json_path, 'r') as f:
data = json.load(f)
width = data['imageWidth']
height = data['imageHeight']
lp_fr_vertices = {}
for annotation in data['shapes']:
if annotation['shape_type'] == 'rectangle':
pts = BBCor_to_pts(*[map(int, pt) for pt in annotation['points']])
elif annotation['shape_type'] == 'polygon':
pts = vertices_rearange([map(int, pt) for pt in annotation['points']])
lp_fr_vertices[annotation['label']] = pts
if annotation['label'] in ['front', 'rear']:
cls = annotation['label']
assert len(lp_fr_vertices) == 2, 'data set length not matched, please check the data'
assert 'lp' in lp_fr_vertices and ('front' in lp_fr_vertices or 'rear' in lp_fr_vertices),\
'Now this function is only supported for one-lp and one-fr'
return width, height, cls, lp_fr_vertices
'''
this function can read image with multiple lp and fr annotations
the annotations are made by labelme
label class name format -> front1, front1_lp, front2, front2_lp ....
return value will be a list of couples (lp info, owner car's fr info)
it will return the lp and fr coordinate start at br and clock-wise
return -> w, h, [couple] in couple -> {index: 1.lp_cor, 2.fr_cor, 3.fr_class}
class will be returned in string format 'front' or 'rear'
'''
def json_lp_fr_couples(json_path):
with open(json_path, 'r') as f:
data = json.load(f)
width = data['imageWidth']
height = data['imageHeight']
couple_lst = []
lp_lst = {}
fr_lst = {}
# first divide the data into lp dictionary and fr dictionary
for annotation in data['shapes']:
if annotation['shape_type'] == 'rectangle':
pts = BBCor_to_pts(*[map(int, pt) for pt in annotation['points']])
elif annotation['shape_type'] == 'polygon':
pts = vertices_rearange([map(int, pt) for pt in annotation['points']])
if 'lp' in annotation['label']:
lp_lst[annotation['label']] = pts
else:
fr_lst[annotation['label']] = pts
for fr in fr_lst:
single_couple = {'fr_cor': fr_lst[fr], 'lp_cor': lp_lst[fr + '_lp']}
if 'front' in fr:
single_couple['fr_class'] = 'front'
elif 'rear' in fr:
single_couple['fr_class'] = 'rear'
couple_lst.append(single_couple)
return width, height, couple_lst
if __name__ == '__main__':
path = '/home/shaoheng/Documents/Thesis_KSH/samples/kr_lowres/IMG_8265.json'
path_0 = '/home/shaoheng/Documents/Thesis_KSH/samples/kr_lowres/IMG_8267.json'
w, h, cp_lst = json_lp_fr_couples(path)
w_0, h_0, cp_lst_0 = json_lp_fr_couples(path)
print w, h, len(cp_lst + cp_lst_0)
|
126508
|
import numpy as np
import os
import warnings
from Input import Input
class InputFromData(Input):
"""
Used to draw random samples from a data file.
"""
def __init__(self, input_filename, delimiter=" ", skip_header=0,
shuffle_data=True):
"""
:param input_filename: path of file containing data to be sampled.
:type input_filename: string
:param delimiter: Character used to separate data in data file.
Can also be an integer to specify width of each entry.
:type delimiter: str or int
:param skip_header: Number of header rows to skip in data file.
:type skip_header: int
:param shuffle_data: Whether or not to randomly shuffle data during
initialization.
:type shuffle_data: bool
"""
if not os.path.isfile(input_filename):
raise IOError("input_filename must refer to a file.")
self._data = np.genfromtxt(input_filename,
delimiter=delimiter,
skip_header=skip_header)
# Data should not contain NaN.
if np.isnan(self._data).any():
raise ValueError("Input data file contains invalid (NaN) entries.")
# Output should be shape (num_samples, sample_size), so reshape
# one dimensional data to a 2d array with one column.
if len(self._data .shape) == 1:
self._data = self._data.reshape(self._data.shape[0], -1)
if shuffle_data:
np.random.shuffle(self._data)
self._index = 0
def draw_samples(self, num_samples):
"""
Returns an array of samples from the previously loaded file data.
:param num_samples: Number of samples to be returned.
:type num_samples: int
:return: 2d ndarray of samples, each row being one sample.
For one dimensional input data, this will have
shape (num_samples, 1)
"""
if not isinstance(num_samples, int):
raise TypeError("num_samples must be an integer.")
if num_samples <= 0:
raise ValueError("num_samples must be a positive integer.")
# Otherwise return the requested sample and increment the index.
sample = self._data[self._index: self._index + num_samples]
self._index += num_samples
sample_size = sample.shape[0]
if num_samples > sample_size:
error_message = "Only " + str(sample_size) + " of the " + \
str(num_samples) + " requested samples are " + \
"available.\nEither provide more sample data " + \
"or increase epsilon to reduce sample size needed."
warning = UserWarning(error_message)
warnings.warn(warning)
return np.copy(sample)
def reset_sampling(self):
"""
Used to restart sampling from beginning of data set.
"""
self._index = 0
|
126564
|
import json
import pytest
import simdjson
def with_buffer(content):
import numpy
parser = simdjson.Parser()
doc = parser.parse(content)
assert len(numpy.frombuffer(doc.as_buffer(of_type='d'))) == 10001
def without_buffer(content):
import numpy
parser = simdjson.Parser()
doc = parser.parse(content)
assert len(numpy.array(doc.as_list())) == 10001
def with_builtin(content):
import numpy
assert len(numpy.array(json.loads(content))) == 10001
def with_orjson(content):
import numpy
import orjson
assert len(numpy.array(orjson.loads(content))) == 10001
@pytest.mark.slow
@pytest.mark.parametrize('loader', [
with_buffer, without_buffer, with_builtin, with_orjson])
def test_array_to_numpy(benchmark, loader):
"""Test how quickly we can load a homogeneous array of floats into a
numpy array."""
with open('jsonexamples/numbers.json', 'rb') as src:
content = src.read()
benchmark.group = 'numpy array (deserialize)'
benchmark.extra_info['group'] = 'numpy'
benchmark(loader, content)
|
126565
|
from sequana.rnadiff import RNADiffResults, RNADiffAnalysis, RNADesign
from . import test_dir
import pytest
def test_design():
d = RNADesign(f"{test_dir}/data/rnadiff/design.csv")
assert d.comparisons == [('Complemented_csrA', 'Mut_csrA'), ('Complemented_csrA', 'WT'), ('Mut_csrA', 'WT')]
assert d.conditions == ['Complemented_csrA', 'Mut_csrA', 'WT']
d = RNADesign(f"{test_dir}/data/rnadiff/design.csv", reference="WT")
assert d.comparisons == [('Complemented_csrA', 'WT'), ('Mut_csrA', 'WT')]
assert d.conditions == ['Complemented_csrA', 'Mut_csrA', 'WT']
@pytest.mark.xfail(reason="too slow or service may be down")
def test_rnadiff_onefolder():
# Featurecounts are saved in sequana/resources/testing/rnadiff/rnadiff_onecond_ex1
# generated from Featurecount of the file to be found in
# sequana/resources/testing/featurecounts/featurecounts_ex1
counts = f"{test_dir}/data/rnadiff/rnadiff_onecond_ex1/counts.csv"
design = f"{test_dir}/data/rnadiff/rnadiff_onecond_ex1/design.csv"
gff = f"{test_dir}/data/rnadiff/rnadiff_onecond_ex1/Lepto.gff"
an = RNADiffAnalysis(counts, design,
condition="condition", comparisons=[("Complemented_csrA", "WT")],
fc_feature="gene", fc_attribute="ID", gff=gff)
an
r = an.run()
r.plot_count_per_sample()
r.plot_percentage_null_read_counts()
#r.plot_volcano()
r.plot_pca()
r.plot_mds()
r.plot_isomap()
r.plot_density()
r.plot_boxplot_normeddata()
r.plot_boxplot_rawdata()
r.plot_dendogram()
r.plot_dispersion()
r.plot_feature_most_present()
r.comparisons['Complemented_csrA_vs_WT'].plot_volcano()
r.comparisons['Complemented_csrA_vs_WT'].plot_padj_hist()
r.comparisons['Complemented_csrA_vs_WT'].plot_pvalue_hist()
r.summary()
r.alpha = 1
r.log2_fc = 1
|
126588
|
class AccountNotFoundError(Exception):
pass
class TransactionError(Exception):
pass
class AccountClosedError(TransactionError):
pass
class InsufficientFundsError(TransactionError):
pass
|
126620
|
import unittest
import uuid
import py3crdt
from py3crdt.orset import ORSet
class TestORSet(unittest.TestCase):
def setUp(self):
# Create a ORSet
self.orset1 = ORSet(uuid.uuid4())
# Create another ORSet
self.orset2 = ORSet(uuid.uuid4())
# Add elements to orset1
self.orset1.add('a', uuid.uuid4())
self.orset1.add('b', uuid.uuid4())
# Add elements to orset1
self.orset2.add('b', uuid.uuid4())
self.orset2.add('c', uuid.uuid4())
self.orset2.add('d', uuid.uuid4())
def test_elements_add_correctly_orset(self):
self.assertEqual([_['elem'] for _ in self.orset1.A], ['a', 'b'])
self.assertEqual([_['elem'] for _ in self.orset1.R], [])
self.assertEqual([_['elem'] for _ in self.orset2.A], ['b', 'c', 'd'])
self.assertEqual([_['elem'] for _ in self.orset2.R], [])
def test_querying_orset_without_removal_and_merging(self):
# Check orset1 querying
self.assertTrue(self.orset1.query('a'))
self.assertTrue(self.orset1.query('b'))
self.assertFalse(self.orset1.query('c'))
self.assertFalse(self.orset1.query('d'))
# Check orset2 querying
self.assertFalse(self.orset2.query('a'))
self.assertTrue(self.orset2.query('b'))
self.assertTrue(self.orset2.query('c'))
self.assertTrue(self.orset2.query('d'))
def test_merging_orset_without_removal(self):
# Check orset1 merging
self.orset1.merge(self.orset2)
self.assertEqual([_['elem'] for _ in self.orset1.A], ['a', 'b', 'c', 'd'])
for _ in self.orset1.A:
if _['elem'] == 'b':
self.assertEqual(len(_['tags']), 2)
break
self.assertEqual([_['elem'] for _ in self.orset1.R], [])
# Check orset2 merging
self.orset2.merge(self.orset1)
self.assertEqual([_['elem'] for _ in self.orset2.A], ['a', 'b', 'c', 'd'])
for _ in self.orset2.A:
if _['elem'] == 'b':
self.assertEqual(len(_['tags']), 2)
break
self.assertEqual([_['elem'] for _ in self.orset2.R], [])
# Check if they are both equal
self.assertEqual([_['elem'] for _ in self.orset1.A], [_['elem'] for _ in self.orset2.A])
self.assertEqual([_['elem'] for _ in self.orset1.R], [_['elem'] for _ in self.orset2.R])
def test_querying_orset_with_merging_without_removal(self):
# Check orset2 merging
self.orset2.merge(self.orset1)
self.assertTrue(self.orset2.query('a'))
self.assertTrue(self.orset2.query('b'))
self.assertTrue(self.orset2.query('c'))
self.assertTrue(self.orset2.query('d'))
# Check orset1 merging
self.orset1.merge(self.orset2)
self.assertTrue(self.orset1.query('a'))
self.assertTrue(self.orset1.query('b'))
self.assertTrue(self.orset1.query('c'))
self.assertTrue(self.orset1.query('d'))
def test_elements_remove_correctly_orset(self):
# Remove elements from orset1
self.orset1.remove('b')
self.assertEqual([_['elem'] for _ in self.orset1.A], ['a', 'b'])
self.assertEqual([_['elem'] for _ in self.orset1.R], ['b'])
# Remove elements from orset2
self.orset2.remove('b')
self.orset2.remove('c')
self.assertEqual([_['elem'] for _ in self.orset2.A], ['b', 'c', 'd'])
self.assertEqual([_['elem'] for _ in self.orset2.R], ['b', 'c'])
def test_querying_orset_without_merging_with_removal(self):
# Remove elements from orset1
self.orset1.remove('b')
# Check orset1 querying
self.assertTrue(self.orset1.query('a'))
self.assertFalse(self.orset1.query('b'))
self.assertFalse(self.orset1.query('c'))
self.assertFalse(self.orset1.query('d'))
# Remove elements from orset2
self.orset2.remove('b')
self.orset2.remove('c')
# Check orset2 querying
self.assertFalse(self.orset2.query('a'))
self.assertFalse(self.orset2.query('b'))
self.assertFalse(self.orset2.query('c'))
self.assertTrue(self.orset2.query('d'))
def test_merging_orset_with_removal(self):
# Remove elements from orset1
self.orset1.remove('b')
# Remove elements from orset2
self.orset2.remove('b')
self.orset2.remove('c')
# Check orset1 merging
self.orset1.merge(self.orset2)
self.assertEqual([_['elem'] for _ in self.orset1.A], ['a', 'b', 'c', 'd'])
self.assertEqual([_['elem'] for _ in self.orset1.R], ['b', 'c'])
for _ in self.orset1.R:
if _['elem'] == 'b':
self.assertEqual(len(_['tags']), 2)
break
# Check orset2 merging
self.orset2.merge(self.orset1)
self.assertEqual([_['elem'] for _ in self.orset2.A], ['a', 'b', 'c', 'd'])
self.assertEqual([_['elem'] for _ in self.orset2.R], ['b', 'c'])
for _ in self.orset2.R:
if _['elem'] == 'b':
self.assertEqual(len(_['tags']), 2)
break
# Check if they are both equal
self.assertEqual([_['elem'] for _ in self.orset1.A], [_['elem'] for _ in self.orset2.A])
self.assertEqual([_['elem'] for _ in self.orset1.R], [_['elem'] for _ in self.orset2.R])
def test_querying_orset_with_merging_with_removal(self):
# Remove elements from orset1
self.orset1.remove('b')
# Remove elements from orset2
self.orset2.remove('b')
self.orset2.remove('c')
# Merge orset2 to orset1
self.orset1.merge(self.orset2)
# Merge orset1 to orset2
self.orset2.merge(self.orset1)
# Check orset1 querying
self.assertTrue(self.orset1.query('a'))
self.assertFalse(self.orset1.query('b'))
self.assertFalse(self.orset1.query('c'))
self.assertTrue(self.orset1.query('d'))
# Check orset2 querying
self.assertTrue(self.orset2.query('a'))
self.assertFalse(self.orset2.query('b'))
self.assertFalse(self.orset2.query('c'))
self.assertTrue(self.orset2.query('d'))
if __name__ == '__main__':
unittest.main()
|
126659
|
from sklearn.metrics import recall_score, roc_curve, auc
def specificity(y_true, y_pred):
return recall_score(y_true, y_pred, pos_label=0)
def sensitivity(y_true, y_pred):
return recall_score(y_true, y_pred, pos_label=1)
def balanced_accuracy(y_true, y_pred):
spec = specificity(y_true, y_pred)
sens = sensitivity(y_true, y_pred)
return (spec + sens) / 2
def auc_score(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
return auc(fpr, tpr)
|
126668
|
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from rest_framework_json_api.relations import (
ResourceRelatedField,
SerializerMethodResourceRelatedField, SerializerMethodHyperlinkedRelatedField
)
from bluebottle.activities.utils import (
BaseActivitySerializer, BaseActivityListSerializer, BaseContributorSerializer
)
from bluebottle.bluebottle_drf2.serializers import PrivateFileSerializer
from bluebottle.deeds.models import Deed, DeedParticipant
from bluebottle.fsm.serializers import TransitionSerializer
from bluebottle.time_based.permissions import CanExportParticipantsPermission
from bluebottle.utils.serializers import ResourcePermissionField
from bluebottle.utils.utils import reverse_signed
class DeedSerializer(BaseActivitySerializer):
permissions = ResourcePermissionField('deed-detail', view_args=('pk',))
links = serializers.SerializerMethodField()
my_contributor = SerializerMethodResourceRelatedField(
model=DeedParticipant,
read_only=True,
source='get_my_contributor'
)
contributors = SerializerMethodHyperlinkedRelatedField(
model=DeedParticipant,
many=True,
related_link_view_name='related-deed-participants',
related_link_url_kwarg='activity_id'
)
participants_export_url = PrivateFileSerializer(
'deed-participant-export',
url_args=('pk', ),
filename='participant.csv',
permission=CanExportParticipantsPermission,
read_only=True
)
def get_my_contributor(self, instance):
user = self.context['request'].user
if user.is_authenticated:
return instance.contributors.filter(user=user).instance_of(DeedParticipant).first()
def get_links(self, instance):
if instance.start and instance.end:
return {
'ical': reverse_signed('deed-ical', args=(instance.pk, )),
'google': instance.google_calendar_link,
}
else:
return {}
class Meta(BaseActivitySerializer.Meta):
model = Deed
fields = BaseActivitySerializer.Meta.fields + (
'my_contributor',
'contributors',
'start',
'end',
'enable_impact',
'target',
'links',
'participants_export_url',
)
class JSONAPIMeta(BaseActivitySerializer.JSONAPIMeta):
resource_name = 'activities/deeds'
included_resources = BaseActivitySerializer.JSONAPIMeta.included_resources + [
'my_contributor', 'my_contributor.user',
]
included_serializers = dict(
BaseActivitySerializer.included_serializers,
**{
'my_contributor': 'bluebottle.deeds.serializers.DeedParticipantSerializer',
'my_contributor.user': 'bluebottle.initiatives.serializers.MemberSerializer',
}
)
class DeedListSerializer(BaseActivityListSerializer):
permissions = ResourcePermissionField('deed-detail', view_args=('pk',))
class Meta(BaseActivityListSerializer.Meta):
model = Deed
fields = BaseActivityListSerializer.Meta.fields + (
'start',
'end',
)
class JSONAPIMeta(BaseActivityListSerializer.JSONAPIMeta):
resource_name = 'activities/deeds'
class DeedTransitionSerializer(TransitionSerializer):
resource = ResourceRelatedField(queryset=Deed.objects.all())
included_serializers = {
'resource': 'bluebottle.deeds.serializers.DeedSerializer',
}
class JSONAPIMeta(object):
included_resources = ['resource', ]
resource_name = 'activities/deed-transitions'
class DeedParticipantSerializer(BaseContributorSerializer):
activity = ResourceRelatedField(
queryset=Deed.objects.all()
)
permissions = ResourcePermissionField('deed-participant-detail', view_args=('pk',))
class Meta(BaseContributorSerializer.Meta):
model = DeedParticipant
meta_fields = BaseContributorSerializer.Meta.meta_fields + ('permissions', )
validators = [
UniqueTogetherValidator(
queryset=DeedParticipant.objects.all(),
fields=('activity', 'user')
)
]
class JSONAPIMeta(BaseContributorSerializer.JSONAPIMeta):
resource_name = 'contributors/deeds/participants'
included_resources = [
'user', 'activity', 'activity.goals',
]
included_serializers = {
'user': 'bluebottle.initiatives.serializers.MemberSerializer',
'activity': 'bluebottle.deeds.serializers.DeedSerializer',
'activity.goals': 'bluebottle.impact.serializers.ImpactGoalSerializer',
}
class DeedParticipantListSerializer(DeedParticipantSerializer):
pass
class DeedParticipantTransitionSerializer(TransitionSerializer):
resource = ResourceRelatedField(queryset=DeedParticipant.objects.all())
field = 'states'
included_serializers = {
'resource': 'bluebottle.deeds.serializers.DeedParticipantSerializer',
'resource.activity': 'bluebottle.deeds.serializers.DeedSerializer',
'resource.activity.goals': 'bluebottle.impact.serializers.ImpactGoalSerializer',
}
class JSONAPIMeta(object):
resource_name = 'contributors/deeds/participant-transitions'
included_resources = [
'resource', 'resource.activity', 'resource.activity.goals'
]
|
126691
|
import sys
sys.path.append('.') # NOQA
from src.datasets.preprocess import normalize
def main(root_path=None, arr_type='nii.gz', modality='mri'):
# save normalized npz arrays in root_path/normalized/
normalize(root_path, arr_type, modality)
if __name__ == '__main__':
from fire import Fire
Fire(main)
|
126770
|
from benchmark import Benchmark, benchmark
import astropy.units as u
import pytest
@benchmark(
{
"log.final.venus.TMan": {"value": 2679.27122, "unit": u.K},
"log.final.venus.TCore": {"value": 6365.71258, "unit": u.K},
"log.final.venus.RIC": {"value": 0.0, "unit": u.km},
"log.final.venus.RadPowerTotal": {"value": 31.49126, "unit": u.TW},
"log.final.venus.MagMom": {"value": 0.0, "unit": u.EMAGMOM},
}
)
class TestVenusApproxInterior(Benchmark):
pass
|
126772
|
class RetrievalMethod():
def __init__(self,db):
self.db = db
def get_sentences_for_claim(self,claim_text,include_text=False):
pass
|
126819
|
from sentence_transformers import CrossEncoder
from .dataset import HardNegativeDataset
from torch.utils.data import DataLoader
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer
import tqdm
import os
import logging
logger = logging.getLogger(__name__)
def hard_negative_collate_fn(batch):
query_id, pos_id, neg_id = zip(*[example.guid for example in batch])
query, pos, neg = zip(*[example.texts for example in batch])
return (query_id, pos_id, neg_id), (query, pos, neg)
class PseudoLabeler(object):
def __init__(self, generated_path, gen_queries, corpus, total_steps, batch_size, cross_encoder, max_seq_length):
assert 'hard-negatives.jsonl' in os.listdir(generated_path)
fpath_hard_negatives = os.path.join(generated_path, 'hard-negatives.jsonl')
self.cross_encoder = CrossEncoder(cross_encoder)
hard_negative_dataset = HardNegativeDataset(fpath_hard_negatives, gen_queries, corpus)
self.hard_negative_dataloader = DataLoader(hard_negative_dataset, shuffle=True, batch_size=batch_size, drop_last=True)
self.hard_negative_dataloader.collate_fn = hard_negative_collate_fn
self.output_path = os.path.join(generated_path, 'gpl-training-data.tsv')
self.total_steps = total_steps
#### retokenization
self.retokenizer = AutoTokenizer.from_pretrained(cross_encoder)
self.max_seq_length = max_seq_length
def retokenize(self, texts):
## We did this retokenization for two reasons:
### (1) Setting the max_seq_length;
### (2) We cannot simply use CrossEncoder(cross_encoder, max_length=max_seq_length),
##### since the max_seq_length will then be reflected on the concatenated sequence,
##### rather than the two sequences independently
texts = list(map(lambda text: text.strip(), texts))
features = self.retokenizer(
texts,
padding=True,
truncation='longest_first',
return_tensors="pt",
max_length=self.max_seq_length
)
decoded = self.retokenizer.batch_decode(
features['input_ids'],
skip_special_tokens=True,
clean_up_tokenization_spaces=True
)
return decoded
def run(self):
# header: 'query_id', 'positive_id', 'negative_id', 'pseudo_label_margin'
data = []
hard_negative_iterator = iter(self.hard_negative_dataloader)
logger.info('Begin pseudo labeling')
for _ in tqdm.trange(self.total_steps):
try:
batch = next(hard_negative_iterator)
except StopIteration:
hard_negative_iterator = iter(self.hard_negative_dataloader)
batch = next(hard_negative_iterator)
(query_id, pos_id, neg_id), (query, pos, neg) = batch
query, pos, neg = [self.retokenize(texts) for texts in [query, pos, neg]]
scores = self.cross_encoder.predict(
list(zip(query, pos)) + list(zip(query, neg)),
show_progress_bar=False
)
labels = scores[:len(query)] - scores[len(query):]
labels = labels.tolist() # Using `tolist` will keep more precision digits!!!
batch_gpl = map(lambda quad: '\t'.join((*quad[:3], str(quad[3]))) + '\n', zip(query_id, pos_id, neg_id, labels))
data.extend(batch_gpl)
logger.info('Done pseudo labeling and saving data')
with open(self.output_path, 'w') as f:
f.writelines(data)
logger.info(f'Saved GPL-training data to {self.output_path}')
|
126874
|
import copy
import time
from collections import defaultdict, namedtuple
import kaa
from . import keybind, theme, modebase, menu
class DefaultMode(modebase.ModeBase):
DOCUMENT_MODE = True
MODENAME = 'default'
SHOW_LINENO = False
SHOW_BLANK_LINE = True
VI_COMMAND_MODE = False
KEY_BINDS = [
keybind.app_keys,
keybind.cursor_keys,
keybind.edit_command_keys,
keybind.addtional_edit_command_keys,
keybind.emacs_keys,
keybind.search_command_keys,
keybind.macro_command_keys,
keybind.rerun_keys,
]
VI_KEY_BIND = [
keybind.command_mode_keys
]
VI_VISUAL_MODE_KEY_BIND = [
keybind.visual_mode_keys
]
VI_VISUAL_LINEWISE_MODE_KEY_BIND = [
keybind.visual_linewise_mode_keys
]
def init_keybind(self):
super().init_keybind()
self.register_keys(self.keybind, self.KEY_BINDS)
self.register_keys(self.keybind_vi_commandmode, self.VI_KEY_BIND)
self.register_keys(self.keybind_vi_visualmode,
self.VI_VISUAL_MODE_KEY_BIND)
self.register_keys(self.keybind_vi_visuallinewisemode,
self.VI_VISUAL_LINEWISE_MODE_KEY_BIND)
def init_menu(self):
self.menu = copy.deepcopy(menu.MENUS)
def init_themes(self):
super().init_themes()
self.themes.append(theme.DefaultThemes)
def close(self):
super().close()
self.keybind_vi_commandmode.clear()
self.keybind_vi_visualmode.clear()
self.keybind_vi_visuallinewisemode.clear()
def on_idle(self):
if self.closed:
return
ret = super().on_idle()
if not ret:
ret = self.check_fileupdate()
return ret
INTERVAL_CHECKUPDATE = 10
def check_fileupdate(self):
if not self.DOCUMENT_MODE:
return
if not kaa.app.mainframe.is_idle():
return
t = time.time()
if t - self._check_fileupdate < self.INTERVAL_CHECKUPDATE:
return
self._check_fileupdate = t
if self.document.fileinfo:
if self.document.fileinfo.check_update():
kaa.app.file_commands.notify_fileupdated(self.document)
def on_esc_pressed(self, wnd, event):
# Pressing esc key starts command mode.
if self.VI_COMMAND_MODE:
is_available, command = self.get_command('editmode.command')
if command:
command(wnd)
if kaa.app.macro.is_recording():
kaa.app.macro.record(1, command)
def on_keypressed(self, wnd, event, s, commands, candidate):
if not commands and not candidate:
if not s or s[0] < ' ':
msg = self.DEFAULT_MENU_MESSAGE or kaa.app.DEFAULT_MENU_MESSAGE
kaa.app.messagebar.set_message(msg)
return super().on_keypressed(wnd, event, s, commands, candidate)
def _show_parenthesis(self, charattrs, pos):
charattrs[pos] = self.get_styleid('parenthesis_cur')
matchpos = self.find_match_parenthesis(pos)
if matchpos is not None:
charattrs[matchpos] = self.get_styleid('parenthesis_match')
def update_charattr(self, wnd):
pos = wnd.cursor.pos
d = {}
c = ''
if pos < self.document.endpos():
c = self.document.buf[pos]
if c and (c in self.PARENTHESIS):
self._show_parenthesis(d, pos)
elif 1 < pos:
c = self.document.buf[pos - 1]
if c in self.PARENTHESIS_CLOSE:
self._show_parenthesis(d, pos - 1)
if d != wnd.charattrs:
wnd.charattrs = d
wnd.screen.style_updated()
return True
PARENTHESIS_OPEN = '({['
PARENTHESIS_CLOSE = ')}]'
PARENTHESIS = PARENTHESIS_OPEN + PARENTHESIS_CLOSE
PARENSIS_PAIR = {o: c for (o, c) in
zip(PARENTHESIS_OPEN + PARENTHESIS_CLOSE,
PARENTHESIS_CLOSE + PARENTHESIS_OPEN)}
def iter_parenthesis(self, posfrom):
while True:
pos = self.document.buf.findchr(
self.PARENTHESIS, posfrom, self.document.endpos())
if pos == -1:
break
attr = self.document.styles.getint(pos)
yield pos, self.document.buf[pos], attr
posfrom = pos + 1
def iter_rev_parenthesis(self, posfrom):
posfrom += 1
while True:
pos = self.document.buf.rfindchr(
self.PARENTHESIS, 0, posfrom)
if pos == -1:
break
attr = self.document.styles.getint(pos)
yield pos, self.document.buf[pos], attr
posfrom = pos
def find_match_parenthesis(self, posfrom):
opener = self.document.buf[posfrom]
curattr = self.document.styles.getint(posfrom)
d = defaultdict(int)
if opener in self.PARENTHESIS_OPEN:
f = self.iter_parenthesis
key = (opener, curattr)
else:
f = self.iter_rev_parenthesis
key = (self.PARENSIS_PAIR[opener], curattr)
for pos, c, attr in f(posfrom):
if c in self.PARENTHESIS_OPEN:
d[(c, attr)] += 1
else:
d[(self.PARENSIS_PAIR[c], attr)] -= 1
if d.get(key) == 0:
return pos
_headerinfo = namedtuple('_headerinfo',
['token', 'parent', 'name', 'dispname', 'lineno', 'pos'])
class HeaderInfo(_headerinfo):
def get_parents(self):
ret = []
p = self.parent
while p:
ret.insert(0, p)
p = p.parent
return ret
def get_headers(self):
return ()
|
126938
|
from FrameLibDocs.utils import write_json, read_yaml
from FrameLibDocs.classes import qParseAndBuild, Documentation
def main(docs):
"""
Creates a dict for the Max Documentation system.
This dict contains is essential for maxObjectLauncher/Refpages to pull the right info.
"""
object_info = read_yaml(docs.object_relationships_path)
docs.interfaces_dir.mkdir(exist_ok=True)
obj_lookup = docs.interfaces_dir / "FrameLib-obj-qlookup.json"
worker = qParseAndBuild()
refpages = [x for x in docs.refpages_dir.rglob("fl.*.xml")]
for ref in refpages:
worker.extract_from_refpage(ref)
worker.extract_keywords(object_info)
worker.extract_seealso(object_info)
worker.build_json_file()
write_json(obj_lookup, worker.d_master_dict)
if __name__ == "__main__":
main(Documentation())
|
127014
|
import json
import string
from backports.tempfile import TemporaryDirectory
from django.test import override_settings
from django.urls import reverse
from django_webtest import WebTest, WebTestMixin
from hypothesis import given, settings
from hypothesis.extra.django import TestCase
from hypothesis.strategies import text
from rest_framework_simplejwt.tokens import AccessToken
from ...auth.tests.fakes import fake_user
from ..models import AnalysisModel
from .fakes import fake_analysis_model
# Override default deadline for all tests to 8s
settings.register_profile("ci", deadline=800.0)
settings.load_profile("ci")
class AnalysisModelApi(WebTest, TestCase):
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1, max_size=10),
version_id=text(alphabet=string.whitespace, min_size=0, max_size=10),
)
def test_version_id_is_missing___response_is_400(self, supplier_id, version_id):
user = fake_user()
response = self.app.post(
reverse('analysis-model-list', kwargs={'version': 'v1'}),
expect_errors=True,
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
params=json.dumps({
'supplier_id': supplier_id,
'version_id': version_id,
}),
content_type='application/json',
)
self.assertEqual(400, response.status_code)
self.assertFalse(AnalysisModel.objects.exists())
@given(
supplier_id=text(alphabet=string.whitespace, min_size=0, max_size=10),
version_id=text(alphabet=string.ascii_letters, min_size=1, max_size=10),
)
def test_supplier_id_is_missing___response_is_400(self, supplier_id, version_id):
user = fake_user()
response = self.app.post(
reverse('analysis-model-list', kwargs={'version': 'v1'}),
expect_errors=True,
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
params=json.dumps({
'supplier_id': supplier_id,
'version_id': version_id,
}),
content_type='application/json',
)
self.assertEqual(400, response.status_code)
self.assertFalse(AnalysisModel.objects.exists())
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1, max_size=10),
model_id=text(alphabet=string.ascii_letters, min_size=1, max_size=10),
version_id=text(alphabet=string.ascii_letters, min_size=1, max_size=10),
)
def test_data_is_valid___object_is_created(self, supplier_id, model_id, version_id):
user = fake_user()
response = self.app.post(
reverse('analysis-model-list', kwargs={'version': 'v1'}),
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
params=json.dumps({
'supplier_id': supplier_id,
'model_id': model_id,
'version_id': version_id,
}),
content_type='application/json',
)
model = AnalysisModel.objects.first()
self.assertEqual(201, response.status_code)
self.assertEqual(model.supplier_id, supplier_id)
self.assertEqual(model.version_id, version_id)
self.assertEqual(model.model_id, model_id)
class ModelSettingsJson(WebTestMixin, TestCase):
def test_user_is_not_authenticated___response_is_forbidden(self):
models = fake_analysis_model()
response = self.app.get(models.get_absolute_settings_url(), expect_errors=True)
self.assertIn(response.status_code, [401,403])
""" Add these check back in once models auto-update their settings fields
"""
def test_settings_json_is_not_present___get_response_is_404(self):
user = fake_user()
models = fake_analysis_model()
response = self.app.get(
models.get_absolute_settings_url(),
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
expect_errors=True,
)
self.assertEqual(404, response.status_code)
def test_settings_json_is_not_present___delete_response_is_404(self):
user = fake_user()
models = fake_analysis_model()
response = self.app.delete(
models.get_absolute_settings_url(),
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
expect_errors=True,
)
self.assertEqual(404, response.status_code)
def test_settings_json_is_not_valid___response_is_400(self):
with TemporaryDirectory() as d:
with override_settings(MEDIA_ROOT=d):
user = fake_user()
models = fake_analysis_model()
json_data = {
"model_settings":{
"event_set":{
"name": "Event Set",
"default": "P",
"options":[
{"id":"P", "desc": "Proabilistic"},
{"id":"H", "desc": "Historic"}
]
},
"event_occurrence_id":{
"name": "Occurrence Set",
"desc": "PiWind Occurrence selection",
"default": 1,
"options":[
{"id":"1", "desc": "Long Term"}
]
},
"boolean_parameters": [
{"name": "peril_wind", "desc":"Boolean option", "default": 1.1},
{"name": "peril_surge", "desc":"Boolean option", "default": True}
],
"float_parameter": [
{"name": "float_1", "desc":"Some float value", "default": False, "max":1.0, "min":0.0},
{"name": "float_2", "desc":"Some float value", "default": 0.3, "max":1.0, "min":0.0}
]
},
"lookup_settings":{
"supported_perils":[
{"i": "WSS", "desc": "Single Peril: Storm Surge"},
{"id": "WTC", "des": "Single Peril: Tropical Cyclone"},
{"id": "WW11", "desc": "Group Peril: Windstorm with storm surge"},
{"id": "WW2", "desc": "Group Peril: Windstorm w/o storm surge"}
]
}
}
response = self.app.post(
models.get_absolute_settings_url(),
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
params=json.dumps(json_data),
content_type='application/json',
expect_errors=True,
)
validation_error = {
'model_settings': ["Additional properties are not allowed ('float_parameter' was unexpected)"],
'model_settings-event_set': ["'desc' is a required property"],
'model_settings-event_occurrence_id-default': ["1 is not of type 'string'"],
'model_settings-boolean_parameters-0-default': ["1.1 is not of type 'boolean'"],
'lookup_settings-supported_perils-0': ["Additional properties are not allowed ('i' was unexpected)", "'id' is a required property"],
'lookup_settings-supported_perils-1': ["Additional properties are not allowed ('des' was unexpected)", "'desc' is a required property"],
'lookup_settings-supported_perils-2-id': ["'WW11' is too long"]
}
self.assertEqual(400, response.status_code)
self.assertDictEqual.__self__.maxDiff = None
self.assertDictEqual(json.loads(response.body), validation_error)
def test_settings_json_is_uploaded___can_be_retrieved(self):
with TemporaryDirectory() as d:
with override_settings(MEDIA_ROOT=d):
user = fake_user()
models = fake_analysis_model()
json_data = {
"model_settings":{
"event_set":{
"name": "Event Set",
"desc": "Either Probablistic or Historic",
"default": "P",
"options":[
{"id":"P", "desc": "Proabilistic"},
{"id":"H", "desc": "Historic"}
]
},
"event_occurrence_id":{
"name": "Occurrence Set",
"desc": "PiWind Occurrence selection",
"default": "1",
"options":[
{"id":"1", "desc": "Long Term"}
]
},
"boolean_parameters": [
{"name": "peril_wind", "desc":"Boolean option", "default": False},
{"name": "peril_surge", "desc":"Boolean option", "default": True}
],
"float_parameters": [
{"name": "float_1", "desc":"Some float value", "default": 0.5, "max":1.0, "min":0.0},
{"name": "float_2", "desc":"Some float value", "default": 0.3, "max":1.0, "min":0.0}
]
},
"lookup_settings":{
"supported_perils":[
{"id": "WSS", "desc": "Single Peril: Storm Surge"},
{"id": "WTC", "desc": "Single Peril: Tropical Cyclone"},
{"id": "WW1", "desc": "Group Peril: Windstorm with storm surge"},
{"id": "WW2", "desc": "Group Peril: Windstorm w/o storm surge"}
]
}
}
self.app.post(
models.get_absolute_settings_url(),
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
params=json.dumps(json_data),
content_type='application/json'
)
response = self.app.get(
models.get_absolute_settings_url(),
headers={
'Authorization': 'Bearer {}'.format(AccessToken.for_user(user))
},
)
self.assertDictEqual.__self__.maxDiff = None
self.assertDictEqual(json.loads(response.body), json_data)
self.assertEqual(response.content_type, 'application/json')
|
127029
|
from biicode.common.utils.serializer import Serializer, SetDeserializer
from biicode.common.model.symbolic.reference import ReferencedDependencies
from biicode.common.model.declare.declaration import Declaration
class FinderResult(object):
SERIAL_RESOLVED_KEY = "r"
SERIAL_UNRESOLVED_KEY = "u"
SERIAL_UPDATED_KEY = "a"
SERIAL_RESPONSE_KEY = "f"
def __init__(self):
self.resolved = ReferencedDependencies()
self.unresolved = set()
self.updated = ReferencedDependencies()
def __repr__(self):
builder = ['FindResult']
if self.resolved:
builder.append('Resolved %s' % self.resolved)
if self.unresolved:
builder.append('UnResolved %s' % self.unresolved)
if self.updated:
builder.append("Updated: %s\n" % self.updated)
return '\n'.join(builder)
def __len__(self):
return len(self.resolved) + len(self.updated)
@staticmethod
def deserialize(data):
'''From dictionary to object FinderResult'''
ret = FinderResult()
if data == None:
return ret
ret.unresolved = SetDeserializer(Declaration).deserialize(data[FinderResult.SERIAL_UNRESOLVED_KEY])
ret.resolved = ReferencedDependencies.deserialize(data[FinderResult.SERIAL_RESOLVED_KEY])
ret.updated = ReferencedDependencies.deserialize(data[FinderResult.SERIAL_UPDATED_KEY])
return ret
def serialize(self):
return Serializer().build((FinderResult.SERIAL_UNRESOLVED_KEY, self.unresolved),
(FinderResult.SERIAL_RESOLVED_KEY, self.resolved),
(FinderResult.SERIAL_UPDATED_KEY, self.updated))
def __eq__(self, other):
if self is other:
return True
return isinstance(other, self.__class__) \
and (other.resolved == self.resolved) \
and (other.unresolved == self.unresolved) \
and (other.updated == self.updated)
def __ne__(self, other):
return not self == other
@property
def update_renames(self):
renames = {}
for dep_dict in self.updated.itervalues():
for declaration, block_cell_names in dep_dict.iteritems():
if not '*' in declaration.name:
#TODO: What to do with python multiple imports
new_declaration = declaration.normalize(block_cell_names)
if new_declaration:
v = [v for v in block_cell_names if '__init__.py' not in v]
if len(v) == 1:
renames[declaration] = new_declaration
return renames
|
127040
|
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn import metrics
from keras.models import Sequential
from keras.layers import Dense,Dropout,Conv1D,Flatten,MaxPool1D,LSTM,GRU
import numpy as np
def get_dense_mlp(input_dim):
model = Sequential()
model.add(Dense(51,input_dim=input_dim,activation='relu',activity_regularizer='l1'))
model.add(Dropout(0.2))
model.add(Dense(32,activation='relu'))
model.add(Dropout(0.15))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='adam',loss='mean_squared_error',metrics=['mean_squared_error'])
return model
def get_cnn_model(input_dim):
model = Sequential()
model.add(Conv1D(32,3,padding='same',input_shape=(input_dim,1),activation='relu'))
model.add(Conv1D(32,3,padding='same',activation='relu'))
model.add(MaxPool1D(pool_size=2))
model.add(Flatten())
model.add(Dense(32,activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='adam',loss='mean_squared_error',metrics=['mean_squared_error'])
return model
def get_rnn_model(timesteps,feature_dim):
model = Sequential()
model.add(LSTM(50,input_shape=(timesteps,feature_dim),dropout=0.2,recurrent_dropout=0.25,return_state=False))
#model.add(Flatten())
model.add(Dense(32,activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='adam',loss='mean_squared_error',metrics=['mean_squared_error'])
return model
def get_gaussian_process_regressor():
gp = GaussianProcessRegressor()
return [gp],['Gaussian Process']
def get_mlp_regressor(num_hidden_units=51):
mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units)
return [mlp],['Multi-Layer Perceptron']
def get_ensemble_models():
rf = RandomForestRegressor(n_estimators=51,min_samples_leaf=5,min_samples_split=3,random_state=42)
bag = BaggingRegressor(n_estimators=51,random_state=42)
extra = ExtraTreesRegressor(n_estimators=71,random_state=42)
ada = AdaBoostRegressor(random_state=42)
grad = GradientBoostingRegressor(n_estimators=101,random_state=42)
classifier_list = [rf,bag,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list, classifier_name_list
def get_linear_model():
elastic_net = ElasticNet()
return [elastic_net],['Elastic Net']
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name ,' ---------\n'
predicted_values = trained_model.predict(X_test)
print "Mean Absolute Error : ", metrics.mean_absolute_error(y_test,predicted_values)
print "Median Absolute Error : ", metrics.median_absolute_error(y_test,predicted_values)
print "Mean Squared Error : ", metrics.mean_squared_error(y_test,predicted_values)
print "R2 Score : ", metrics.r2_score(y_test,predicted_values)
print "---------------------------------------\n"
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is np.nan:
for i in range(len(dataframe)):
if i > 1000:
break
if type(dataframe[column][i]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
break
elif type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
filename = 'train.csv'
train_frame = pd.read_csv(filename)
train_frame.dropna(inplace=True)
del train_frame['customer_ID']
del train_frame['time']
target_values = train_frame['cost'].values
del train_frame['cost']
train_frame = label_encode_frame(train_frame)
X_train,X_test,y_train,y_test = train_test_split(train_frame.values,target_values,test_size=0.2,random_state=42)
# model = get_dense_mlp(X_train.shape[1])
# print model.summary()
# model.fit(X_train,y_train,epochs=25,batch_size=36)
# print model.evaluate(X_test,y_test)
# X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],1)
# X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],1)
# model = get_cnn_model(X_train.shape[1])
# print model.summary()
# model.fit(X_train,y_train,epochs=25,batch_size=36)
# print model.evaluate(X_test,y_test)
# X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],1)
# X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],1)
X_train = X_train.reshape(X_train.shape[0],1,X_train.shape[1])
X_test = X_test.reshape(X_test.shape[0],1,X_test.shape[1])
model = get_rnn_model(1,22)
print model.summary()
model.fit(X_train,y_train,epochs=25,batch_size=36)
print model.evaluate(X_test,y_test)
|
127069
|
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import object
from nose.tools import (assert_equal, assert_not_equal, assert_almost_equal,
raises)
from nose.plugins.skip import Skip, SkipTest
from .test_helpers import (
true_func, assert_equal_array_array, make_1d_traj, data_filename,
assert_items_equal
)
import openpathsampling as paths
from openpathsampling.high_level.interface_set import GenericVolumeInterfaceSet
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
class TestInterfaceSet(object):
def setup(self):
paths.InterfaceSet._reset()
self.cv = paths.FunctionCV(name="x", f=lambda s: s.xyz[0][0])
self.lambdas = [0.0, 0.1, 0.2, 0.3]
min_vals= [float("-inf")] * len(self.lambdas)
self.volumes = [paths.CVDefinedVolume(self.cv, min_v, max_v)
for min_v, max_v in zip(min_vals, self.lambdas)]
self.interface_set = paths.InterfaceSet(self.volumes, self.cv,
self.lambdas)
self.decreasing = paths.InterfaceSet(list(reversed(self.volumes)),
self.cv,
list(reversed(self.lambdas)))
self.no_lambda_set = paths.InterfaceSet(self.volumes, self.cv)
def test_direction(self):
assert_equal(self.interface_set.direction, 1)
assert_equal(self.no_lambda_set.direction, 0)
assert_equal(self.decreasing.direction, -1)
def test_get_lambda(self):
for (v, l) in zip(self.volumes, self.lambdas):
assert_equal(self.interface_set.get_lambda(v), l)
assert_equal(self.no_lambda_set.get_lambda(v), None)
def test_list_behavior(self):
# len
assert_equal(len(self.interface_set), 4)
assert_equal(len(self.no_lambda_set), 4)
# getitem, contains
for i in range(4):
assert_equal(self.volumes[i], self.interface_set[i])
assert_equal(self.volumes[i] in self.interface_set, True)
# getitem for slices
sliced = self.interface_set[0:2]
for vol in sliced:
assert_equal(sliced.get_lambda(vol),
self.interface_set.get_lambda(vol))
# special case of -1 needs to work (used frequently!)
assert_equal(self.volumes[-1], self.interface_set[-1])
# iter
for vol in self.interface_set:
assert_equal(vol in self.volumes, True)
# reversed
i = 0
for vol in reversed(self.interface_set):
assert_equal(vol, self.volumes[3-i])
i += 1
def test_no_direction_possible(self):
min_vals=[-0.1, -0.2, -0.3]
max_vals=[0.1, 0.2, 0.3]
volumes = [paths.CVDefinedVolume(self.cv, min_v, max_v)
for min_v, max_v in zip(min_vals, max_vals)]
ifaces = paths.InterfaceSet(volumes)
assert_equal(ifaces.cv, None)
assert_equal(ifaces.cv_max, None)
assert_equal(ifaces.direction, 0)
class TestGenericVolumeInterfaceSet(object):
def test_sanitize_input(self):
# this is just to make the rest a little more readable
sanitize = GenericVolumeInterfaceSet._sanitize_input
assert_equal(([float("-inf")]*3, [0.0, 0.1, 0.2], 1),
sanitize(float("-inf"), [0.0, 0.1, 0.2]))
assert_equal(([0.2, 0.1, 0.0], [float("inf")]*3, -1),
sanitize([0.2, 0.1, 0.0], float("inf")))
assert_equal(([-0.1, -0.2], [0.1, 0.2], 0),
sanitize([-0.1, -0.2], [0.1, 0.2]))
assert_equal(([0.0, 0.0], [0.1, 0.2], 1),
sanitize([0.0, 0.0], [0.1, 0.2]))
assert_equal(([-0.1, -0.2], [0.0, 0.0], -1),
sanitize([-0.1, -0.2], [0.0, 0.0]))
# and the idiot case:
assert_equal(([-0.1, -0.1], [0.1, 0.1], 0),
sanitize([-0.1, -0.1], [0.1, 0.1]))
@raises(RuntimeError)
def test_bad_sanitize(self):
GenericVolumeInterfaceSet._sanitize_input([0.0, -0.1],
[0.1, 0.2, 0.3])
class TestVolumeInterfaceSet(object):
def setup(self):
paths.InterfaceSet._reset()
self.cv = paths.FunctionCV(name="x", f=lambda s: s.xyz[0][0])
self.increasing_set = paths.VolumeInterfaceSet(cv=self.cv,
minvals=float("-inf"),
maxvals=[0.0, 0.1])
self.decreasing_set = paths.VolumeInterfaceSet(cv=self.cv,
minvals=[0.0, -0.1],
maxvals=float("inf"))
self.weird_set = paths.VolumeInterfaceSet(cv=self.cv,
minvals=[-0.1, -0.2],
maxvals=[0.1, 0.2])
def test_initialization(self):
assert_equal(len(paths.InterfaceSet._cv_max_dict), 1)
cv_max = list(paths.InterfaceSet._cv_max_dict.values())[0]
assert_equal(len(self.increasing_set), 2)
assert_equal(self.increasing_set.direction, 1)
assert_equal(self.increasing_set.lambdas, [0.0, 0.1])
assert_equal(self.increasing_set.cv_max, cv_max)
assert_equal(len(self.decreasing_set), 2)
assert_equal(self.decreasing_set.direction, -1)
assert_equal(self.decreasing_set.lambdas, [0.0, -0.1])
# TODO: decide what to do about cv_max for decreasing/weird
assert_equal(len(self.weird_set), 2)
assert_equal(self.weird_set.direction, 0)
assert_equal(self.weird_set.lambdas, None)
def test_new_interface(self):
new_iface = self.increasing_set.new_interface(0.25)
expected = paths.CVDefinedVolume(self.cv, float("-inf"), 0.25)
assert_equal(expected, new_iface)
@raises(TypeError)
def test_bad_new_interface(self):
self.weird_set.new_interface(0.25)
def test_storage(self):
import os
fname = data_filename("interface_set_storage_test.nc")
if os.path.isfile(fname):
os.remove(fname)
template_traj = make_1d_traj([0.0])
storage_w = paths.Storage(fname, "w")
storage_w.save(template_traj)
storage_w.save(self.increasing_set)
storage_w.sync_all()
storage_w.close()
storage_r = paths.AnalysisStorage(fname)
reloaded = storage_r.interfacesets[0]
assert_items_equal(reloaded.lambdas, self.increasing_set.lambdas)
for (truth, beauty) in zip(self.increasing_set, reloaded):
assert_equal(truth, beauty)
for (v, l) in zip(reloaded.volumes, reloaded.lambdas):
assert_equal(reloaded.get_lambda(v), l)
if os.path.isfile(fname):
os.remove(fname)
class TestPeriodicVolumeInterfaceSet(object):
def setup(self):
paths.InterfaceSet._reset()
self.cv = paths.FunctionCV(name="x", f=lambda s: s.xyz[0][0])
self.increasing_set = paths.PeriodicVolumeInterfaceSet(
cv=self.cv,
minvals=0.0,
maxvals=[100, 150, 200-360],
period_min=-180,
period_max=180
)
def test_initialization(self):
assert_equal(self.increasing_set.direction, 1)
assert_equal(len(self.increasing_set), 3)
assert_equal(self.increasing_set.lambdas, [100, 150, -160])
def test_new_interface(self):
new_iface = self.increasing_set.new_interface(-140)
expected = paths.PeriodicCVDefinedVolume(self.cv, 0.0, -140, -180, 180)
assert_equal(new_iface, expected)
def test_storage(self):
import os
fname = data_filename("interface_set_storage_test.nc")
if os.path.isfile(fname):
os.remove(fname)
template_traj = make_1d_traj([0.0])
template = template_traj[0]
storage_w = paths.Storage(fname, "w")
storage_w.save(template_traj)
storage_w.save(self.increasing_set)
storage_w.sync_all()
storage_r = paths.AnalysisStorage(fname)
reloaded = storage_r.interfacesets[0]
assert_items_equal(reloaded.lambdas, self.increasing_set.lambdas)
assert_equal(reloaded.period_min, self.increasing_set.period_min)
assert_equal(reloaded.period_max, self.increasing_set.period_max)
for (truth, beauty) in zip(self.increasing_set, reloaded):
assert_equal(truth, beauty)
for (v, l) in zip(reloaded.volumes, reloaded.lambdas):
assert_equal(reloaded.get_lambda(v), l)
storage_r.close()
storage_w.close()
if os.path.isfile(fname):
os.remove(fname)
|
127089
|
def coroutine(seq):
count = 0
while count < 200:
count += yield
seq.append(count)
seq = []
c = coroutine(seq)
next(c)
___assertEqual(seq, [])
c.send(10)
___assertEqual(seq, [10])
c.send(10)
___assertEqual(seq, [10, 20])
|
127095
|
from __future__ import print_function
from math import pi,floor
print(int(((-330+1024)*pi/(6.0*2048.0))/(0.625*pi/180.0)))
#phi=[]
#for i in range(0,2048):
# p = int((i*pi/(6.0*2048.0)+15.0*pi/180.0)/(0.625*pi/180.0))
# p = int((i*2*pi/(6.0*2048.0))/(0.625*pi/180.0))
# phi.append(str(p))
#print('const ap_int<8> phiLUT[2047] = {'+','.join(phi)+'};')
#import pdb;pdb.set_trace()
def bits(number, size_in_bits):
"""
The bin() function is *REALLY* unhelpful when working with negative numbers.
It outputs the binary representation of the positive version of that number
with a '-' at the beginning. Woop-di-do. Here's how to derive the two's-
complement binary of a negative number:
complement(bin(+n - 1))
`complement` is a function that flips each bit. `+n` is the negative number
made positive.
"""
if number < 0:
return compliment(bin(abs(number) - 1)[2:]).rjust(size_in_bits, '1')
else:
return bin(number)[2:].rjust(size_in_bits, '0')
def compliment(value):
return ''.join(COMPLEMENT[x] for x in value)
COMPLEMENT = {'1': '0', '0': '1'}
phiLUT=[]
kPHI = 57.2958/0.625/1024.;
for i in range(0,1024):
phiLUT.append(0)
for phi in range(-512,512):
address = int(bits(phi,10),2)
phiF=float(phi)
phiNew = 24+int(floor(kPHI*phiF));
if phiNew > 69:
phiNew = 69;
if phiNew < -8:
phiNew = -8;
phiLUT[address]=(str(phiNew))
print('const ap_int<8> phiLUT[1024] = {'+','.join(phiLUT)+'};')
ptLUT=[]
lsb = 1.25/(1<<13)
for i in range(0,4096):
ptLUT.append(6)
for K in range(-2048,2048):
address = int(bits(K,12),2)
if K>=0:
charge=1
else:
charge=-1
FK=lsb*abs(K)
if abs(K)>2047:
FK=lsb*2047
if abs(K)<26:
FK=lsb*26
FK = 0.898*FK/(1.0-0.6*FK);
FK=FK-26.382*FK*FK*FK*FK*FK;
FK=FK-charge*1.408e-3;
FK=FK/1.17;
if (FK!=0.0):
pt=int(2.0/FK)
else:
pt=511
if pt>511:
pt=511
if pt<6:
pt=6;
ptLUT[address]=str(pt)
print('const ap_uint<9> ptLUT[4096] = {'+','.join(ptLUT)+'};')
|
127106
|
import psycopg2
from flask import g
from psycopg2 import errorcodes
class DuplicateRestaurantNameError(RuntimeError):
pass
class Restaurant:
def __init__(self, id, name):
self.id = id
self.name = name
@classmethod
def create(cls, name):
query = """
INSERT INTO
restaurants
(name)
VALUES
(%s)
RETURNING
restaurant_id;
"""
with g.db.cursor() as cursor:
try:
cursor.execute(query, (name,))
g.db.commit()
id = cursor.fetchone()[0]
return cls(id=id, name=name)
except psycopg2.Error as e:
if e.pgcode == errorcodes.UNIQUE_VIOLATION:
raise DuplicateRestaurantNameError()
@classmethod
def by_id(cls, id):
query = """
SELECT restaurant_id, name FROM restaurants WHERE restaurant_id = %s;
"""
with g.db.cursor() as cursor:
cursor.execute(query, (id,))
row = cursor.fetchone()
return cls(id=row[0], name=row[1])
def rename(self, name):
query = """
UPDATE
restaurants
SET
name = %s
WHERE
restaurant_id = %s;
"""
with g.db.cursor() as cursor:
try:
cursor.execute(query, (name, self.id))
g.db.commit()
except psycopg2.Error as e:
if e.pgcode == errorcodes.UNIQUE_VIOLATION:
raise DuplicateRestaurantNameError()
@classmethod
def list(cls):
query = """
SELECT restaurant_id, name FROM restaurants;
"""
with g.db.cursor() as cursor:
cursor.execute(query)
rows = cursor.fetchall()
restaurants = []
for row in rows:
restaurant = cls(id=row[0], name=row[1])
restaurants.append(restaurant)
return restaurants
|
127164
|
from .. import rman_bl_nodes
from ..rfb_icons import get_bxdf_icon, get_light_icon, get_lightfilter_icon, get_projection_icon
from ..rman_constants import RMAN_BL_NODE_DESCRIPTIONS
def get_description(category, node_name):
description = None
for n in rman_bl_nodes.__RMAN_NODES__.get(category, list()):
if n.name == node_name:
if n.help:
description = n.help
break
if not description:
description = RMAN_BL_NODE_DESCRIPTIONS.get(node_name, node_name)
return description
def get_bxdf_items():
items = []
i = 1
for bxdf_cat, bxdfs in rman_bl_nodes.__RMAN_NODE_CATEGORIES__['bxdf'].items():
if not bxdfs[1]:
continue
tokens = bxdf_cat.split('_')
bxdf_category = ' '.join(tokens[1:])
items.append(('', bxdf_category.capitalize(), '', 0, 0))
for n in bxdfs[1]:
rman_bxdf_icon = get_bxdf_icon(n.name)
items.append( (n.name, n.name, '', rman_bxdf_icon.icon_id, i))
i += 1
return items
def get_light_items():
rman_light_icon = get_light_icon("PxrRectLight")
items = []
i = 0
dflt = 'PxrRectLight'
items.append((dflt, dflt, '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHT_NODES__:
if n.name == 'PxrMeshLight':
continue
if n.name != dflt:
i += 1
light_icon = get_light_icon(n.name)
description = get_description('light', n.name)
description = RMAN_BL_NODE_DESCRIPTIONS.get(n.name, n.name)
if n.help:
description = n.help
items.append( (n.name, n.name, description, light_icon.icon_id, i))
return items
def get_lightfilter_items():
items = []
i = 0
rman_light_icon = get_lightfilter_icon("PxrBlockerLightFilter")
dflt = 'PxrBlockerLightFilter'
items.append((dflt, dflt, '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHTFILTER_NODES__:
if n.name != dflt:
i += 1
light_icon = get_lightfilter_icon(n.name)
description = RMAN_BL_NODE_DESCRIPTIONS.get(n.name, n.name)
if n.help:
description = n.help
items.append( (n.name, n.name, description, light_icon.icon_id, i))
return items
def get_projection_items():
items = []
i = 0
proj_icon = get_projection_icon("PxrCamera")
dflt = 'PxrCamera'
items.append((dflt, dflt, '', proj_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_PROJECTION_NODES__:
if n.name != dflt:
i += 1
proj_icon = get_projection_icon(n.name)
description = RMAN_BL_NODE_DESCRIPTIONS.get(n.name, n.name)
if n.help:
description = n.help
items.append( (n.name, n.name, description, proj_icon.icon_id, i))
return items
|
127178
|
import numpy as np
import matplotlib.pyplot as plt
x,y = np.linspace(0,5,100),np.linspace(0,2,100)
X,Y = np.meshgrid(x,y)
U = X
V = Y*(1-Y)
speed = np.sqrt(U*U + V*V)
start = [[.3,.15], [0.3,1], [.3,1.5],[3,1.5]]
fig0, ax0 = plt.subplots()
strm = ax0.streamplot(x,y, U, V, color=(.75,.90,.93))
strmS = ax0.streamplot(x,y, U, V, start_points=start, color="crimson", linewidth=2)
plt.show()
|
127190
|
from os.path import join
import numpy as np
import matplotlib as mpl
# For headless environments
mpl.use('Agg') # NOQA
import matplotlib.pyplot as plt
PLOT_CURVES = 'plot_curves'
def plot_curves(run_path):
"""Plot the training and validation accuracy over epochs.
# Arguments
run_path: the path to the files for a run
"""
log_path = join(run_path, 'log.txt')
log = np.genfromtxt(log_path, delimiter=',', skip_header=1)
epochs = log[:, 0]
acc = log[:, 1]
val_acc = log[:, 3]
plt.figure()
plt.title('Training Log')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.grid()
plt.plot(epochs, acc, '-', label='Training')
plt.plot(epochs, val_acc, '--', label='Validation')
plt.legend(loc='best')
accuracy_path = join(run_path, 'accuracy.pdf')
plt.savefig(accuracy_path, format='pdf', dpi=300)
|
127201
|
import numpy as np
from PIL import Image
import skimage
import skimage.transform
import scipy.io as io
import matplotlib.pyplot as plt
import utils.common_utils as cu
import scipy.io
def load_data(path, f):
img = np.array(Image.open(path))
img = skimage.transform.resize(img, (img.shape[0]//f,img.shape[1]//f), anti_aliasing = True)
img /= np.max(img)
return img
def load_mask(path, target_shape):
shutter = io.loadmat(path)['shutter_indicator']
shutter = shutter[shutter.shape[0]//4:-shutter.shape[0]//4,shutter.shape[1]//4:-shutter.shape[1]//4,:]
shutter = skimage.transform.resize(shutter, (target_shape[0],target_shape[1]), anti_aliasing = True)
return shutter
def load_simulated():
downsampling_factor = 16
mask_np = scipy.io.loadmat('data/single_shot_video/shutter_ds.mat')['shutter_indicator'][1:-2,...]
meas_np = scipy.io.loadmat('data/single_shot_video/meas_simulated.mat')['im']
mask_np = mask_np[meas_np.shape[0]//2:-meas_np.shape[0]//2, meas_np.shape[1]//2:-meas_np.shape[1]//2]
psf_np = load_data('data/single_shot_video/psf.tif',downsampling_factor)[1:][...,1]
return meas_np, psf_np, mask_np
def preplot(recons):
recons = cu.ts_to_np(recons).transpose(1,2,0)
recons /= np.max(recons)
return recons[recons.shape[0]//4:-recons.shape[0]//4,recons.shape[1]//4:-recons.shape[1]//4]
def preplot2(recons):
recons = cu.ts_to_np(recons).transpose(2,3,0,1)
recons /= np.max(recons)
recons = np.clip(recons, 0,1)
return recons
def plot(channel, recons):
recons = preplot(recons)
#n = random.randint(0,recons.shape[-1]-1)
#frame = recons[:,:,n]
plt.imshow(np.mean(recons,-1), cmap='gray')
plt.title('Reconstruction: channel %d mean projection'%(channel))
plt.show()
def plot3d(recons):
recons = recons[0].detach().cpu().numpy().transpose(2,3,0,1)
#n = random.randint(0,recons.shape[-1]-1)
#frame = recons[:,:,n]
plt.imshow(np.mean(recons,-1))
plt.title('Reconstruction: mean projection')
plt.show()
def plot_slider(x):
plt.title('Reconstruction: frame %d'%(x))
plt.axis('off')
plt.imshow(video[:,:,:,x])
return x
|
127211
|
from snowddl.blueprint import DatabaseBlueprint, DatabaseIdent
from snowddl.parser.abc_parser import AbstractParser
database_json_schema = {
"type": "object",
"properties": {
"is_transient": {
"type": "boolean"
},
"retention_time": {
"type": "integer"
},
"is_sandbox": {
"type": "boolean"
},
"comment": {
"type": "string"
}
},
"additionalProperties": False
}
class DatabaseParser(AbstractParser):
def load_blueprints(self):
for database_path in self.base_path.iterdir():
if not database_path.is_dir():
continue
params = self.parse_single_file(database_path / 'params.yaml', database_json_schema)
bp = DatabaseBlueprint(
full_name=DatabaseIdent(self.env_prefix, database_path.name),
is_transient=params.get('is_transient', False),
retention_time=params.get('retention_time', None),
is_sandbox=params.get('is_sandbox', False),
comment=params.get('comment', None),
)
self.config.add_blueprint(bp)
|
127226
|
import torch
import torch.nn as nn
import numpy as np
import torch.distributions as TD
import scipy
import scipy.linalg
from copy import deepcopy
from multipledispatch import dispatch
from collections import Iterable
import sdepy
from .em import batchItoEuler
from .em_proxrec import torchBatchItoEulerProxrec
class OU_distrib_modeler:
'''
This class models distribution X(t) of OrnsteinUhlenbeck process
dX(t) = - \grad \frac{1}{2}(x - b)^T A (x - b) dt + \sqrt{2 \beta^{-1}} d W(t)
b is n-dim vector
A is (n \cross n) invertible symmetric matrix
\beta is a positive scalar parameters
W(t) is standart n-dim Wiener process
'''
def _U_rotate(self, M):
if len(M.shape) == 1:
return self.U @ np.diag(M) @ self.U.conj().T
return self.U @ M @ self.U.conj().T
def __init__(self, A, b, beta):
if isinstance(A, torch.Tensor):
A = A.detach().cpu()
if isinstance(b, torch.Tensor):
b = b.detach().cpu()
self.A = np.asarray(A)
self.b = np.asarray(b)
self.beta = beta
assert self.A.shape[0] == self.A.shape[1], 'matrix A must be square'
# assert np.allclose(self.A.T, self.A, 1e-13), 'matrix A must be symmetric'
self.A = 0.5*(self.A + self.A.T)
assert self.b.shape[0] == self.A.shape[0], 'b an A dimensions must coincide'
assert np.linalg.matrix_rank(self.A, tol=1e-6) == self.A.shape[0], 'matrix A must have full rank'
self.theta = self.A
T, U = scipy.linalg.schur(self.theta)
# assert np.allclose(T, np.diag(np.diagonal(T)), 1e-13)
self.T = np.diagonal(T)
self.U = U
def _get_add_params(self, t):
_scale_param = self._U_rotate(np.exp(-self.T * t))
_add_param = (np.eye(self.b.shape[0]) - _scale_param).dot(self.b)
return _scale_param, _add_param
def _get_var_param(self, t):
return 2. * (1./self.beta) * self._U_rotate((1. - np.exp(- 2.*self.T * t))/(2. * self.T))
def get_distrib_params(self, X_0, t, dtype=torch.float32, device='cpu'):
X_0 = np.asarray(X_0)
_scale, _add = self._get_add_params(t)
mean = _scale.dot(X_0) + _add
# e_min_th_t = self._U_rotate(np.exp(-self.T * t))
# mean = e_min_th_t.dot(X_0) + (np.eye(self.b.shape[0]) - e_min_th_t).dot(self.b)
# var = 2. * (1./self.beta) * self._U_rotate((1. - np.exp(- 2.*self.T * t))/(2. * self.T))
var = self._get_var_param(t)
trc_mean = torch.tensor(mean, dtype=dtype).to(device)
trc_var = torch.tensor(var, dtype=dtype).to(device)
return trc_mean, trc_var
def get_distrib(self, X_0, t, dtype=torch.float32, device='cpu'):
mean, var = self.get_distrib_params(X_0, t, dtype=dtype, device=device)
return TD.MultivariateNormal(mean, var)
def get_normal_distrib_params(mvnormal_distribution):
assert isinstance(mvnormal_distribution, (TD.Normal, TD.MultivariateNormal))
mean = mvnormal_distribution.mean
var = 0.
if isinstance(mvnormal_distribution, TD.MultivariateNormal):
var = mvnormal_distribution.covariance_matrix
else:
var = mvnormal_distribution.scale ** 2
if var.size(0) == 1:
var = var.view(1, 1)
else:
var = torch.diag(var)
return mean, var
def create_ou_distrib_modeler(mvnormal_distribution, beta=1.0):
mean, var = get_normal_distrib_params(mvnormal_distribution)
var *= beta
return OU_distrib_modeler(torch.inverse(var), mean, beta)
class OU_tDeterministic(TD.MultivariateNormal):
@staticmethod
def _get_params(X_0, ou_distrib_modeler, t):
return ou_distrib_modeler.get_distrib_params(
X_0, t, dtype=X_0.dtype, device=X_0.device)
def __init__(self, X_0, ou_distrib_modeler, t):
super().__init__(*self._get_params(X_0, ou_distrib_modeler, t))
class OU_tNormal(TD.MultivariateNormal):
@staticmethod
def _get_params(init_distrib, ou_distrib_modeler, t):
assert isinstance(init_distrib, (TD.Normal, TD.MultivariateNormal))
b, A = get_normal_distrib_params(init_distrib)
i_A = torch.inverse(A)
dtype = b.dtype
device = b.device
F, g = ou_distrib_modeler._get_add_params(t)
F, g = torch.tensor(F, dtype=dtype).to(device), torch.tensor(g, dtype=dtype).to(device)
Sigma = torch.tensor(ou_distrib_modeler._get_var_param(t), dtype=dtype).to(device)
i_Sigma = torch.inverse(Sigma)
i_Xi = F.T @ i_Sigma @ F + i_A
Xi = torch.inverse(i_Xi)
Psi = i_Sigma - i_Sigma @ F @ Xi @ F.T @ i_Sigma
i_Psi = torch.inverse(Psi)
phi = i_Sigma @ F @ Xi @ i_A @ b
_mean = g + i_Psi @ phi
return _mean, i_Psi
def __init__(self, init_distrib, ou_distrib_modeler, t):
super().__init__(*self._get_params(init_distrib, ou_distrib_modeler, t))
class OU_tMixtureNormal(TD.MixtureSameFamily):
@dispatch(TD.Distribution, OU_distrib_modeler, object)
def __init__(self, init_distrib, ou_distrib_modeler, t):
assert isinstance(init_distrib, TD.MixtureSameFamily)
mixture = init_distrib.mixture_distribution
comp = init_distrib.component_distribution
assert isinstance(comp, TD.MultivariateNormal)
means = comp.loc
vars = comp.covariance_matrix
return self.__init__(mixture, means, vars, ou_distrib_modeler, t)
@dispatch(TD.Distribution, Iterable, Iterable, OU_distrib_modeler, object)
def __init__(self, mixture_distrib, means, vars, ou_distrib_modeler, t):
assert len(means) == len(vars)
f_means = []
f_vars = []
for i in range(len(means)):
distrib = TD.MultivariateNormal(means[i], vars[i])
f_mean, f_var = OU_tNormal._get_params(distrib, ou_distrib_modeler, t)
f_means.append(f_mean)
f_vars.append(f_var)
f_distrib = TD.MultivariateNormal(torch.stack(f_means), torch.stack(f_vars))
super().__init__(mixture_distrib, f_distrib)
def create_em_proxrec_samples(
x0, pdf0, final_distrib, t_fin, t_stp,
beta=1., verbose=False, **proxrec_params):
'''
creates diffusion samples along with pdf estimate using https://arxiv.org/pdf/1809.10844.pdf
'''
assert isinstance(final_distrib, (TD.Normal, TD.MultivariateNormal))
fin_mean, fin_var = get_normal_distrib_params(final_distrib)
device = x0.device
dtype = 'float32' if x0.dtype == torch.float32 else 'float64'
targ_grad_potential = get_ou_potential_func(
fin_mean, fin_var, dim_first=False, beta=beta,
grad=True, _type='torch', dtype=dtype, device=device)
targ_potential = get_ou_potential_func(
fin_mean, fin_var, dim_first=False, beta=beta,
grad=False, _type='torch', dtype=dtype, device=device)
assert len(x0.shape) == 2
x_fin, pdf_fin = torchBatchItoEulerProxrec(
targ_potential, x0, pdf0, t_stp, t_fin, beta=beta, verbose=verbose,
grad_pot_func=targ_grad_potential, **proxrec_params)
return x_fin, pdf_fin
@dispatch(np.ndarray, TD.Distribution, float, float, int)
def create_em_samples(
x0, final_distrib,
t_fin, t_stp, n_samples,
beta=1., return_init_spls=False):
'''
creates diffusion samples using Euler-Maruyama iterations
:Parameters:
x0 : np.ndarray : particles distributed according to initial distribution
init_distrib: torch.Distribution like : particles initial distribution
final_distrib: torch.Distribution like : final MultivariateNormal distribution
t_fin : float : particles observation time (start time is 0)
t_stp : float : time step of EM iterations
n_samples : int :count of particles to propagate
beta : float : diffusion magnitude
'''
assert isinstance(final_distrib, (TD.Normal, TD.MultivariateNormal))
fin_mean, fin_var = get_normal_distrib_params(final_distrib)
targ_grad_potential = get_ou_potential_func(
fin_mean, fin_var, dim_first=False, beta=beta, grad=True)
np_fin_var_inv = torch.inverse(fin_var).cpu().numpy()
np_fin_mean = fin_mean.cpu().numpy()
def minus_targ_grad_potential(x):
return - targ_grad_potential(x)
assert x0.shape[0] == n_samples
assert len(x0.shape) == 2
x_fin = batchItoEuler(minus_targ_grad_potential, x0, t_stp, t_fin, beta=beta)
if not return_init_spls:
return x_fin
return x0, x_fin
@dispatch(TD.Distribution, TD.Distribution, float, float, int)
def create_em_samples(
init_distrib, final_distrib,
t_fin, t_stp, n_samples, beta=1.,
return_init_spls=False):
'''
creates diffusion samples using Euler-Maruyama iterations
:Parameters:
init_distrib: torch.Distribution like : particles initial distribution
'''
x0 = init_distrib.sample((n_samples,)).cpu().numpy()
return create_em_samples(
x0, final_distrib, t_fin, t_stp, n_samples,
beta=beta, return_init_spls=return_init_spls)
def generate_ou_target(dim, mean_scale=1., dtype=torch.float32, device='cpu'):
var = make_spd_matrix(dim)
mean = np.random.randn(dim) * mean_scale
trc_var = torch.tensor(var, dtype=dtype).to(device)
trc_mean = torch.tensor(mean, dtype=dtype).to(device)
targ_distrib = TD.MultivariateNormal(trc_mean, trc_var)
init = np.random.randn(dim) * mean_scale
return targ_distrib, mean, var
def get_ou_potential_func(
mean, var, dim_first=True, beta=1., grad=False,
_type='numpy', device='cpu', dtype='float32'):
assert _type in ['numpy', 'torch']
assert dtype in ['float32', 'float64']
if isinstance(var, torch.Tensor):
var = var.detach().cpu().numpy()
if isinstance(mean, torch.Tensor):
mean = mean.detach().cpu().numpy()
if isinstance(var, list):
var = np.array(var)
if isinstance(mean, list):
mean = np.array(mean)
if isinstance(var, float):
var = np.array(var).reshape(1, 1)
if isinstance(mean, float):
mean = np.array(mean).reshape(1)
assert len(var.shape) == 2
assert len(mean.shape) == 1
assert var.shape[0] == var.shape[1]
assert var.shape[0] == mean.shape[0]
var_inv = np.linalg.inv(var)
dim = mean.shape[0]
if _type == 'numpy':
def ou_potential_func_dim_first(x):
assert x.shape[0] == dim
x_norm = x - mean.reshape((-1, 1))
w = (1. / (2. * beta)) * np.sum(x_norm * np.dot(var_inv, x_norm), axis=0)
return w
def ou_grad_potential_func_dim_first(x):
x_norm = x - mean.reshape((-1, 1))
return np.dot(var_inv, x_norm) / beta
else:
dtype = torch.float32 if dtype == 'float32' else torch.float64
mean = torch.tensor(mean, dtype=dtype, device=device)
var_inv = torch.tensor(var_inv, dtype=dtype, device=device)
def ou_potential_func_dim_first(x):
assert x.size(0) == dim
x_norm = x - mean.view((-1, 1))
w = (1. / (2. * beta)) * torch.sum(x_norm * torch.matmul(var_inv, x_norm), dim=0)
return w
def ou_grad_potential_func_dim_first(x):
x_norm = x - mean.view((-1, 1))
return torch.matmul(var_inv, x_norm) / beta
if dim_first:
if grad:
return ou_grad_potential_func_dim_first
else:
return ou_potential_func_dim_first
if grad:
return lambda x : ou_grad_potential_func_dim_first(x.T).T
else:
return lambda x : ou_potential_func_dim_first(x.T).T
if __name__ == "__main__":
##############
A = np.array([[1., 0.5], [0.5, 2.]])
b = np.array([0.1, 0.7])
nm = TD.MultivariateNormal(torch.tensor(b), torch.tensor(A))
beta=2.0
ou_d_m = create_ou_distrib_modeler(nm, beta)
X_0 = np.array([0.4, 2.3])
mean, var = ou_d_m.get_distrib_params(X_0, 70.)
assert np.allclose(mean, b, 1e-5)
assert np.allclose(var, A, 1e-5)
##############
d = OU_tDeterministic(X_0, ou_d_m, 0.01)
print(d.distrib.mean)
print(d.distrib.covariance_matrix)
|
127240
|
import pytest
from vnep_approx import treewidth_model
from test_data.request_test_data import create_test_request, example_requests
from test_data.tree_decomposition_test_data import PACE_INPUT_FORMAT
@pytest.mark.parametrize("test_data", PACE_INPUT_FORMAT.items())
def test_conversion_to_PACE_format_works(test_data):
req_id, expected = test_data
req = create_test_request(req_id)
td_comp = treewidth_model.TreeDecompositionComputation(req)
assert td_comp._convert_graph_to_td_input_format() == expected
@pytest.mark.parametrize("req_id", example_requests)
def test_tree_decomposition_computation_returns_valid_tree_decompositions(req_id):
req = create_test_request(req_id)
td_comp = treewidth_model.TreeDecompositionComputation(req)
tree_decomp = td_comp.compute_tree_decomposition()
assert tree_decomp.is_tree_decomposition(req)
@pytest.mark.parametrize("req_id", example_requests)
def test_small_nice_decomposition(req_id):
req = create_test_request(req_id)
td_comp = treewidth_model.TreeDecompositionComputation(req)
tree_decomp = td_comp.compute_tree_decomposition()
assert tree_decomp.is_tree_decomposition(req)
sntd = treewidth_model.SmallSemiNiceTDArb(tree_decomp, req)
assert sntd.is_tree_decomposition(req)
|
127244
|
from django import forms
from ..nospam import utils
from .fields import HoneypotField
class BaseForm(forms.Form):
def __init__(self, request, *args, **kwargs):
self._request = request
super(BaseForm, self).__init__(*args, **kwargs)
class AkismetForm(BaseForm):
akismet_fields = {
'comment_author': 'name',
'comment_author_email': 'email',
'comment_author_url': 'url',
'comment_content': 'comment',
}
akismet_api_key = None
def akismet_check(self):
fields = {}
for key, value in self.akismet_fields.items():
fields[key] = self.cleaned_data[value]
return utils.akismet_check(self._request, akismet_api_key=self.akismet_api_key, **fields)
class RecaptchaForm(BaseForm):
pass
class HoneyPotForm(BaseForm):
accept_terms = HoneypotField()
class SuperSpamKillerForm(RecaptchaForm, HoneyPotForm, AkismetForm):
pass
|
127278
|
import pandas as pd
import numpy as np
import os
from collections import Counter
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
def find_correlation_clusters(corr,corr_thresh):
dissimilarity = 1.0 - corr
hierarchy = linkage(squareform(dissimilarity), method='single')
diss_thresh = 1.0 - corr_thresh
labels = fcluster(hierarchy, diss_thresh, criterion='distance')
return labels
def relabel_clusters(labels,metric_columns):
cluster_count = Counter(labels)
cluster_order = {cluster[0]: idx for idx, cluster in enumerate(cluster_count.most_common())}
relabeled_clusters = [cluster_order[l] for l in labels]
relabled_count = Counter(relabeled_clusters)
labeled_column_df = pd.DataFrame({'group': relabeled_clusters, 'column': metric_columns}).sort_values(
['group', 'column'], ascending=[True, True])
return labeled_column_df, relabled_count
def make_load_matrix(labeled_column_df,metric_columns,relabled_count,corr):
load_mat = np.zeros((len(metric_columns), len(relabled_count)))
for row in labeled_column_df.iterrows():
orig_col = metric_columns.index(row[1][1])
if relabled_count[row[1][0]]>1:
load_mat[orig_col, row[1][0]] = 1.0/ (np.sqrt(corr) * float(relabled_count[row[1][0]]) )
else:
load_mat[orig_col, row[1][0]] = 1.0
is_group = load_mat.astype(bool).sum(axis=0) > 1
column_names=['metric_group_{}'.format(d + 1) if is_group[d]
else labeled_column_df.loc[labeled_column_df['group']==d,'column'].iloc[0]
for d in range(0, load_mat.shape[1])]
loadmat_df = pd.DataFrame(load_mat, index=metric_columns, columns=column_names)
loadmat_df['name'] = loadmat_df.index
sort_cols = list(loadmat_df.columns.values)
sort_order = [False] * loadmat_df.shape[1]
sort_order[-1] = True
loadmat_df = loadmat_df.sort_values(sort_cols, ascending=sort_order)
loadmat_df = loadmat_df.drop('name', axis=1)
return loadmat_df
def save_load_matrix(data_set_path,loadmat_df, labeled_column_df):
save_path = data_set_path.replace('.csv', '_load_mat.csv')
print('saving loadings to ' + save_path)
loadmat_df.to_csv(save_path)
save_path = data_set_path.replace('.csv', '_groupmets.csv')
print('saving metric groups to ' + save_path)
group_lists=['|'.join(labeled_column_df[labeled_column_df['group']==g]['column'])
for g in set(labeled_column_df['group'])]
pd.DataFrame(group_lists,index=loadmat_df.columns.values,columns=['metrics']).to_csv(save_path)
def find_metric_groups(data_set_path,group_corr_thresh=0.5):
score_save_path=data_set_path.replace('.csv','_scores.csv')
assert os.path.isfile(score_save_path),'You must run listing 5.3 or 7.5 to save metric scores first'
score_data = pd.read_csv(score_save_path,index_col=[0,1])
score_data.drop('is_churn',axis=1,inplace=True)
metric_columns = list(score_data.columns.values)
labels = find_correlation_clusters(score_data.corr(),group_corr_thresh)
labeled_column_df, relabled_count = relabel_clusters(labels,metric_columns)
loadmat_df = make_load_matrix(labeled_column_df, metric_columns, relabled_count,group_corr_thresh)
save_load_matrix(data_set_path,loadmat_df,labeled_column_df)
|
127299
|
import argparse
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import sys
sys.path.append('.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('res', type=str)
parser.add_argument('gt', type=str)
args = parser.parse_args()
return args
def main(args):
evaluate(args.res, args.gt)
def evaluate(res_file, gt_file):
annType = 'segm'
cocoGt = COCO(gt_file)
cocoDt = cocoGt.loadRes(res_file)
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = cocoGt.getImgIds()
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if __name__ == "__main__":
args = parse_args()
main(args)
|
127311
|
from django.contrib.auth.models import User
from core.models import UserProfile, CourseStatus, Course, ProviderProfile, TimelineItem
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email')
class BioSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = UserProfile
fields = ('user', 'first_name', 'last_name', 'mail', 'resume', 'birth_date', 'avatar')
class BioRestrictedSerializer(BioSerializer):
"""
This is used to avoid the update of some fields in this model
"""
class Meta:
model = UserProfile
fields = ('first_name', 'last_name', 'mail', 'resume', 'birth_date', 'avatar')
class CourseSerializer(serializers.ModelSerializer):
tags = serializers.StringRelatedField(many=True)
provider = serializers.StringRelatedField()
class Meta:
model = Course
fields = ('title', 'url', 'badge', 'provider', 'tags',)
class ProviderProfileSerializer(serializers.ModelSerializer):
class Meta:
model = ProviderProfile
fields = ('error', 'username_provider')
class TimelineItemSerializer(serializers.ModelSerializer):
class Meta:
model = TimelineItem
fields = ('title', 'description', 'type', 'start_date', 'end_date')
class CousesStatusSerializer(serializers.ModelSerializer):
course = CourseSerializer()
class Meta:
model = CourseStatus
fields = ('course', 'completed')
|
127376
|
from django.conf import settings
from django.views.generic import TemplateView
from daiquiri.core.views import ModelPermissionMixin
from .models import Record
class ManagementView(ModelPermissionMixin, TemplateView):
template_name = 'stats/management.html'
permission_required = 'daiquiri_stats.view_record'
def get_context_data(self, **kwargs):
context = super(ManagementView, self).get_context_data(**kwargs)
context['resource_types'] = settings.STATS_RESOURCE_TYPES
for resource_type in context['resource_types']:
queryset = Record.objects.filter(resource_type=resource_type['key'])
resource_type['count'] = queryset.count()
resource_type['client_ips'] = queryset.order_by('client_ip').values('client_ip').distinct().count()
resource_type['users'] = queryset.order_by('user').values('user').distinct().count()
return context
|
127386
|
import pytest
from dbt.tests.util import run_dbt, check_relations_equal
snapshot_sql = """
{% snapshot snapshot_check_cols_new_column %}
{{
config(
target_database=database,
target_schema=schema,
strategy='check',
unique_key='id',
check_cols=var("check_cols", ['name']),
updated_at="'" ~ var("updated_at") ~ "'::timestamp",
)
}}
{% if var('version') == 1 %}
select 1 as id, 'foo' as name
{% else %}
select 1 as id, 'foo' as name, 'bar' as other
{% endif %}
{% endsnapshot %}
"""
expected_csv = """
id,name,other,dbt_scd_id,dbt_updated_at,dbt_valid_from,dbt_valid_to
1,foo,NULL,0d73ad1b216ad884c9f7395d799c912c,2016-07-01 00:00:00.000,2016-07-01 00:00:00.000,2016-07-02 00:00:00.000
1,foo,bar,7df3783934a6a707d51254859260b9ff,2016-07-02 00:00:00.000,2016-07-02 00:00:00.000,
""".lstrip()
@pytest.fixture(scope="class")
def snapshots():
return {"snapshot_check_cols_new_column.sql": snapshot_sql}
@pytest.fixture(scope="class")
def seeds():
return {"snapshot_check_cols_new_column_expected.csv": expected_csv}
@pytest.fixture(scope="class")
def project_config_update():
return {
"seeds": {
"quote_columns": False,
"test": {
"snapshot_check_cols_new_column_expected": {
"+column_types": {
"dbt_updated_at": "timestamp without time zone",
"dbt_valid_from": "timestamp without time zone",
"dbt_valid_to": "timestamp without time zone",
},
},
},
},
}
def test_simple_snapshot(project):
"""
Test that snapshots using the "check" strategy and explicit check_cols support adding columns.
Approach:
1. Take a snapshot that checks a single non-id column
2. Add a new column to the data
3. Take a snapshot that checks the new non-id column too
As long as no error is thrown, then the snapshot was successful
"""
# 1. Create a table that represents the expected data after a series of snapshots
results = run_dbt(["seed", "--show", "--vars", "{version: 1, updated_at: 2016-07-01}"])
assert len(results) == 1
# Snapshot 1
results = run_dbt(
["snapshot", "--vars", "{version: 1, check_cols: ['name'], updated_at: 2016-07-01}"]
)
assert len(results) == 1
# Snapshot 2
results = run_dbt(
[
"snapshot",
"--vars",
"{version: 2, check_cols: ['name', 'other'], updated_at: 2016-07-02}",
]
)
assert len(results) == 1
check_relations_equal(
project.adapter,
["snapshot_check_cols_new_column", "snapshot_check_cols_new_column_expected"],
compare_snapshot_cols=True,
)
|
127396
|
import numpy as np
import scipy.signal
__all__ = ['instant_parameters']
#-----------------------------------
def instant_parameters(signal, fs = None):
'''
Instant parameters estimation:
..math::
analitc_signal = hilbert(signal)
envelope = |analitc_signal|
phase = unwrap(angle(analitc_signal))
frequency = diff(phase)
Paramteres
-------------
signal: 1d ndarray,
input signal (can be real or complex);
fs: float or None,
sampling frequecny, fs = signal.size, if None
Return
-------------
frequency: 1d ndarray,
instant frequency to time relation.
envelope: 1d ndarray,
instant envelope to time relation.
phase: 1d ndarray,
instant pahse to time relation.
'''
if fs is None:
fs = len(signal)
signal = np.asarray(signal)
if signal.dtype != complex:
analytic = scipy.signal.hilbert(signal)
else:
analytic = signal
envelope = np.abs(analytic)
angles = np.angle(analytic)
phase = np.unwrap(angles)
frequency = np.concatenate((np.diff(angles),
[angles[-2] - angles[-1]]))
for i in range(frequency.size):
if frequency[i]< 0:
if i>0: frequency[i] = frequency[i-1]
else: frequency[i] = frequency[i+1]
frequency = frequency*fs/(2*np.pi)
return frequency, envelope, phase
|
127427
|
import imageio
import sys
if __name__ == '__main__':
if len(sys.argv) == 2:
_, filename = sys.argv
img = imageio.imread(filename).astype(dtype='float32')
print('DTYPE:', img.dtype)
print('SHAPE:', img.shape)
elif len(sys.argv) == 3:
_, filename, type = sys.argv
img = imageio.imread(filename).astype(dtype=type)
sys.stdout.buffer.write(img.tobytes())
else:
print("Usage: dump_img.py filename [type]")
|
127461
|
from torch_sparse import coalesce
def dense_to_sparse(tensor):
index = tensor.nonzero()
value = tensor[index]
index = index.t().contiguous()
index, value = coalesce(index, value, tensor.size(0), tensor.size(1))
return index, value
|
127471
|
import os
import random
import numpy as np
import argparse
import logging
import pickle
from pprint import pformat
from exps.data import get_modelnet40_data_fps
from settree.set_data import SetDataset, OPERATIONS, flatten_datasets
import exps.eval_utils as eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default='test')
parser.add_argument("--log", action='store_true')
parser.add_argument("--seed", type=int, default=45)
parser.add_argument('--save', action='store_true')
args = parser.parse_args()
log_dir = os.path.join(os.path.abspath('__file__' + '/../'), 'outputs', 'fps')
eval.create_logger(log_dir=log_dir,
log_name=args.exp_name,
dump=args.log)
logging.info('Args:\n' + pformat(vars(args)))
np.random.seed(args.seed)
random.seed(args.seed)
# x_train, y_train, x_test, y_test = get_modelnet40_data(down_sample=10,
# do_standardize=True,
# flip=False,
# seed=args.seed)
x_train, y_train, x_test, y_test = get_modelnet40_data_fps()
ds_train = SetDataset(records=x_train, is_init=True)
ds_test = SetDataset(records=x_test, is_init=True)
logging.info(args)
shared_gbdt_params = {'n_estimators': 150,
'learning_rate': 0.1,
'max_depth': 6,
'max_features': None,
'subsample': 1,
'random_state': args.seed}
set_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'operations': OPERATIONS,
'splitter': 'sklearn',
'use_attention_set': True,
'use_attention_set_comp': False,
'attention_set_limit': 5,
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'random_state': shared_gbdt_params['random_state'],
'save_path': os.path.join(log_dir, '{}_checkpoint.pkl'.format(args.exp_name)),
'validation_fraction': 0.1,
'tol': 1e-3,
'n_iter_no_change': 5,
'verbose': 3}
sklearn_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'criterion': 'mse',
'learning_rate': shared_gbdt_params['learning_rate'],
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'validation_fraction': 0.1,
'tol': 1e-3,
'n_iter_no_change': 5,
'verbose': 3,
'random_state': shared_gbdt_params['random_state']}
xgboost_params = {#'objective': 'binary:logistic', # 'multi:softmax', binary:logistic
'max_depth': shared_gbdt_params['max_depth'],
'n_jobs': 10,
'learning_rate': shared_gbdt_params['learning_rate'],
'n_estimators': shared_gbdt_params['n_estimators'],
'colsample_bytree': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'reg_lambda': 0,
'reg_alpha': 0,
'verbosity': 0,
'random_state': shared_gbdt_params['random_state'],
'seed': shared_gbdt_params['random_state']}
x_train, x_test = flatten_datasets(ds_train, ds_test,
operations_list=set_params['operations'],
ds_val=None)
xgboost_gbtd = eval.train_and_predict_xgboost(xgboost_params,
x_train, y_train,
x_test, y_test,
val_x=None, val_y=None,
early_stopping_rounds=None,
mode='multi_cls')
set_gbtd = eval.train_and_predict_set_gbdt(set_params,
ds_train, y_train,
ds_test, y_test,
eval_train=False,
mode='multi_cls')
if args.save:
pkl_filename = os.path.join(log_dir, '{}_model.pkl'.format(args.exp_name))
with open(pkl_filename, 'wb') as file:
pickle.dump(set_gbtd, file)
|
127491
|
import numpy
import h5py
import scipy.sparse
from pyscf import gto, scf, mcscf, fci, ao2mo, lib
from pauxy.systems.generic import Generic
from pauxy.utils.from_pyscf import generate_integrals
from pauxy.utils.io import (
write_qmcpack_wfn,
write_qmcpack_dense,
write_input
)
mol = gto.M(atom=[('N', 0, 0, 0), ('N', (0,0,3.0))], basis='sto-3g', verbose=3,
unit='Bohr')
mf = scf.RHF(mol)
mf.chkfile = 'scf.chk'
ehf = mf.kernel()
M = 6
N = 6
mc = mcscf.CASSCF(mf, M, N)
mc.chkfile = 'scf.chk'
mc.kernel()
e_tot, e_cas, fcivec, mo, mo_energy = mc.kernel()
print(ehf, e_tot)
# Rotate by casscf mo coeffs.
h1e, chol, nelec, enuc = generate_integrals(mol, mf.get_hcore(), mo,
chol_cut=1e-5, verbose=True)
write_qmcpack_dense(h1e, chol.T.copy(), nelec,
h1e.shape[-1], enuc=enuc, filename='afqmc.h5')
coeff, occa, occb = zip(*fci.addons.large_ci(fcivec, M, (3,3),
tol=0.1, return_strs=False))
core = [i for i in range(mc.ncore)]
occa = [numpy.array(core + [o + mc.ncore for o in oa]) for oa in occa]
occb = [numpy.array(core + [o + mc.ncore for o in ob]) for ob in occb]
coeff = numpy.array(coeff,dtype=numpy.complex128)
# Sort in ascending order.
ixs = numpy.argsort(numpy.abs(coeff))[::-1]
coeff = coeff[ixs]
occa = numpy.array(occa)[ixs]
occb = numpy.array(occb)[ixs]
nmo = mf.mo_coeff.shape[-1]
rdm = mc.make_rdm1()
eigs, eigv = numpy.linalg.eigh(rdm)
psi0a = eigv[::-1,:mol.nelec[0]].copy()
psi0b = eigv[::-1,:mol.nelec[1]].copy()
psi0 = [psi0a, psi0b]
write_qmcpack_wfn('afqmc.h5', (coeff,occa,occb), 'uhf',
mol.nelec, nmo, init=psi0, mode='a')
write_input('input.json', 'afqmc.h5', 'afqmc.h5')
|
127627
|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def plot_cca(image, objects_cordinates):
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
ax.imshow(image, cmap=plt.cm.gray)
for each_cordinate in objects_cordinates:
min_row, min_col, max_row, max_col = each_cordinate
bound_box = mpatches.Rectangle((min_col, min_row), max_col - min_col,
max_row - min_row, fill=False, edgecolor='red', linewidth=2)
ax.add_patch(bound_box)
plt.show()
|
127648
|
comida = ["tacos", "pozole", "<NAME>", "pastel", "spaghetti", "gorditas"]
print("Acceder a los elementos de la lista individualmente")
print(comida[0])
print(comida[2])
print(comida[5])
print("Mostrar todos los elementos")
print(comida)
print()
print("Eliminar algun elemento")
del comida[3]
comida.pop()
print(comida)
print()
print("Agregar elementos en distintos puntos de la lista")
print("Al inicio burritos")
comida.insert(0,"burritos")
print("Al medio sopa")
comida.insert(2, "sopa")
print("Al final con .append tostadas")
comida.append("tostadas")
print("Asi quedaria ahora")
print(comida)
print()
print("Ahora a jugar con el orden de la lista")
print("Orden original")
print(comida,"\n")
print("Ordenados alfabeticamente sin alterar la lista original")
print(sorted(comida), "\n")
print("No se modifico la original")
print(comida,"\n")
print("Ordenados alfabeticamente de manera inversa sin alterar la lista original")
print(sorted(comida,reverse=True),"\n")
print("No se modifico la original")
print(comida,"\n")
print("Invertir la lista original")
comida.reverse()
print(comida,"\n")
print("Regresar la lista que acabamos de invertir a como estaba")
comida.reverse()
print(comida,"\n")
print("Ordenar la lista alfabeticamente modificando la lista")
comida.sort()
print(comida,"\n")
print("Ordenar la lista alfabeticamente de forma invertida modificando la lista")
comida.sort(reverse=True)
print(comida)
print()
print("Al final mostrar la longitud de esta lista que es de:",len(comida))
|
127661
|
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
# path
this_dir = osp.dirname(__file__)
# refer path
refer_dir = osp.join(this_dir, '..', 'data', 'ref')
sys.path.insert(0, refer_dir)
# lib path
sys.path.insert(0, osp.join(this_dir, '..'))
sys.path.insert(0, osp.join(this_dir, '..', 'lib'))
sys.path.insert(0, osp.join(this_dir, '..', 'tools'))
sys.path.insert(0, osp.join(this_dir, '..', 'utils'))
|
127676
|
class Eval:
"""
Eval
"""
def __init__(self):
self.predict_num = 0
self.correct_num = 0
self.gold_num = 0
self.precision = 0
self.recall = 0
self.fscore = 0
def clear(self):
"""
:return:
"""
self.predict_num = 0
self.correct_num = 0
self.gold_num = 0
self.precision = 0
self.recall = 0
self.fscore = 0
def getFscore(self):
"""
:return:
"""
if self.predict_num == 0:
self.precision = 0
else:
self.precision = (self.correct_num / self.predict_num) * 100
if self.gold_num == 0:
self.recall = 0
else:
self.recall = (self.correct_num / self.gold_num) * 100
if self.precision + self.recall == 0:
self.fscore = 0
else:
self.fscore = 2 * (self.precision * self.recall) / (self.precision + self.recall)
return self.precision, self.recall, self.fscore
def acc(self):
"""
:return:
"""
return self.correct_num / self.gold_num
|
127709
|
import dico
client = dico.Client("YOUR_BOT_TOKEN")
client.on_ready = lambda ready: print(f"Bot ready, with {len(ready.guilds)} guilds.")
@client.on_message_create
async def on_message_create(message: dico.Message):
if message.content.startswith("!button"):
button = dico.Button(style=dico.ButtonStyles.PRIMARY, label="Hello!", custom_id="hello")
button2 = dico.Button(style=dico.ButtonStyles.DANGER, label="Bye!", custom_id="bye")
row = dico.ActionRow(button, button2)
await message.reply("Button!", component=row)
@client.on_interaction_create
async def on_button_response(interaction: dico.Interaction):
if not interaction.type.message_component or not interaction.data.component_type.button:
return
resp = dico.InteractionResponse(callback_type=dico.InteractionCallbackType.CHANNEL_MESSAGE_WITH_SOURCE,
data=dico.InteractionApplicationCommandCallbackData(content=f"Yes, it's {interaction.data.custom_id}.",
flags=dico.InteractionApplicationCommandCallbackDataFlags.EPHEMERAL))
await interaction.create_response(resp)
client.run()
|
127726
|
from contextlib import closing
from pathlib import Path
import pytest
from asyncio_extras import open_async
@pytest.fixture(scope='module')
def testdata():
return b''.join(bytes([i] * 1000) for i in range(10))
@pytest.fixture
def testdatafile(tmpdir_factory, testdata):
file = tmpdir_factory.mktemp('file').join('testdata')
file.write(testdata)
return Path(str(file))
@pytest.mark.asyncio
async def test_read(testdatafile, testdata):
async with open_async(testdatafile, 'rb') as f:
data = await f.read()
assert f.closed
assert data == testdata
@pytest.mark.asyncio
async def test_write(testdatafile, testdata):
async with open_async(testdatafile, 'ab') as f:
await f.write(b'f' * 1000)
assert testdatafile.stat().st_size == len(testdata) + 1000
@pytest.mark.asyncio
async def test_async_readchunks(testdatafile):
value = 0
async with open_async(testdatafile, 'rb') as f:
async for chunk in f.async_readchunks(1000):
assert chunk == bytes([value] * 1000)
value += 1
@pytest.mark.asyncio
async def test_no_contextmanager(testdatafile, testdata):
"""Test that open_async() can be used without an async context manager."""
with closing(await open_async(testdatafile, 'rb')) as f:
data = await f.read()
assert f.closed
assert data == testdata
@pytest.mark.asyncio
async def test_iteration(testdatafile, testdata):
lines = []
async for line in open_async(testdatafile, 'rb'):
lines.append(line)
data = b''.join(lines)
assert data == testdata
|
127748
|
from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf import settings
import json
import datetime
from registrar.models import Course
from registrar.models import Announcement
from registrar.models import Syllabus
from registrar.models import Policy
from registrar.models import Lecture
from registrar.models import Assignment
from registrar.models import Quiz
from registrar.models import Exam
from registrar.models import CourseSubmission
# Public Functions
#--------------------
@login_required(login_url='/landpage')
def overview_page(request, course_id):
course = Course.objects.get(id=course_id)
try:
review = CourseSubmission.objects.get(course=course)
except CourseSubmission.DoesNotExist:
review = None
try:
announcements = Announcement.objects.filter(course=course).order_by('-post_date')
except Announcement.DoesNotExist:
announcements = None
try:
syllabus = Syllabus.objects.get(course=course)
except Syllabus.DoesNotExist:
syllabus = None
try:
policy = Policy.objects.get(course=course)
except Policy.DoesNotExist:
policy = None
try:
lectures = Lecture.objects.filter(course=course).order_by('-lecture_num')
except Lecture.DoesNotExist:
lectures = None
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
except Assignment.DoesNotExist:
assignments = None
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
except Quiz.DoesNotExist:
quizzes = None
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
except Exam.DoesNotExist:
exams = None
return render(request, 'teacher/overview/view.html',{
'course': course,
'total_final_mark_worth': total_final_mark_worth(course),
'has_final_exam': has_final_exam(exams),
'review': review,
'announcements' : announcements,
'syllabus': syllabus,
'lectures': lectures,
'assignments': assignments,
'quizzes': quizzes,
'exams': exams,
'policy': policy,
'COURSE_SUBMITTED_FOR_REVIEW_STATUS': settings.COURSE_SUBMITTED_FOR_REVIEW_STATUS,
'COURSE_IN_REVIEW_STATUS': settings.COURSE_IN_REVIEW_STATUS,
'COURSE_UNAVAILABLE_STATUS': settings.COURSE_UNAVAILABLE_STATUS,
'COURSE_AVAILABLE_STATUS': settings.COURSE_AVAILABLE_STATUS,
'COURSE_REJECTED_STATUS': settings.COURSE_REJECTED_STATUS,
'user': request.user,
'tab': 'overview',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls': settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls': settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
@login_required(login_url='/landpage')
def submit_course_for_review(request, course_id):
course = Course.objects.get(id=course_id)
response_data = {'status' : 'failed', 'message' : ''}
# Validate announcements
try:
announcements = Announcement.objects.filter(course=course).order_by('-post_date')
if announcements.count() < 1:
response_data['message'] = 'zero announcements'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Announcement.DoesNotExist:
response_data['message'] = 'no announcements detected'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate syllabus
try:
Syllabus.objects.get(course=course)
except Syllabus.DoesNotExist:
response_data['message'] = 'no syllabus set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate policy
try:
Policy.objects.get(course=course)
except Policy.DoesNotExist:
response_data['message'] = 'no policy set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate lectures
try:
lectures = Lecture.objects.filter(course=course).order_by('-lecture_num')
if lectures.count() < 2:
response_data['message'] = 'minimum 2 lectures required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Lecture.DoesNotExist:
response_data['message'] = 'no policy set'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate assignments
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
if assignments.count() < 1:
response_data['message'] = 'minimum 1 assignment required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Assignment.DoesNotExist:
response_data['message'] = 'no assignment(s)'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate quizzes
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
if quizzes.count() < 1:
response_data['message'] = 'minimum 1 quiz required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Quiz.DoesNotExist:
response_data['message'] = 'no quiz(zes) found'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate exams
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
if exams.count() < 1:
response_data['message'] = 'minimum 1 exam required'
return HttpResponse(json.dumps(response_data), content_type="application/json")
except Exam.DoesNotExist:
response_data['message'] = 'no exams(s) found'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Validate final mark calculator
total_worth = total_final_mark_worth(course)
if total_worth != 100:
response_data['message'] = 'total final mark must add up to 100%'
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Make sure we have a final exam
is_final = has_final_exam(exams)
if is_final == False:
response_data['message'] = 'course requires only 1 final exam'
return HttpResponse(json.dumps(response_data), content_type="application/json")
review = CourseSubmission.objects.create(
course=course,
)
review.save()
# Make course available.
course.status = settings.COURSE_AVAILABLE_STATUS
course.save()
response_data = {'status' : 'success', 'message' : 'submitted course review'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Private Functions
#--------------------
# Function looks through the course assignments/exams/quizzes and returns
# the accumulated worth total.
def total_final_mark_worth(course):
total_worth = 0 # Variable used to track total worth of the coursework.
# Fetch from database
try:
assignments = Assignment.objects.filter(course=course).order_by('-assignment_num')
except Assignment.DoesNotExist:
assignments = None
try:
quizzes = Quiz.objects.filter(course=course).order_by('-quiz_num')
except Quiz.DoesNotExist:
quizzes = None
try:
exams = Exam.objects.filter(course=course).order_by('-exam_num')
except Exam.DoesNotExist:
exams = None
# Iterate through all coursework and calculate the total.
for assignment in assignments:
total_worth += assignment.worth
for quiz in quizzes:
total_worth += quiz.worth
for exam in exams:
total_worth += exam.worth
return total_worth
# Function will iterate through all the exams and return either True or False
# depending if a 'final exam' was found in the list.
def has_final_exam(exams):
count = 0
for exam in exams:
if exam.is_final == True:
count += 1
return count == 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.