id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
94558
|
import pickle
from collections import Counter
from math import log
from typing import List, Dict, Tuple
import numpy as np
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cosine
from common import check_data_set, flatten_nested_iterables
from preprocessors.configs import PreProcessingConfigs
from utils.file_ops import create_dir, check_paths
def extract_word_to_doc_ids(docs_of_words: List[List[str]]) -> Dict[str, List[int]]:
"""Extracted the document ids where unique words appeared."""
word_to_doc_ids = {}
for doc_id, words in enumerate(docs_of_words):
appeared_words = set()
for word in words:
if word not in appeared_words:
if word in word_to_doc_ids:
word_to_doc_ids[word].append(doc_id)
else:
word_to_doc_ids[word] = [doc_id]
appeared_words.add(word)
return word_to_doc_ids
def extract_word_to_doc_counts(word_to_doc_ids: Dict[str, List[int]]) -> Dict[str, int]:
return {word: len(doc_ids) for word, doc_ids in word_to_doc_ids.items()}
def extract_windows(docs_of_words: List[List[str]], window_size: int) -> List[List[str]]:
"""Word co-occurrence with context windows"""
windows = []
for doc_words in docs_of_words:
doc_len = len(doc_words)
if doc_len <= window_size:
windows.append(doc_words)
else:
for j in range(doc_len - window_size + 1):
window = doc_words[j: j + window_size]
windows.append(window)
return windows
def extract_word_counts_in_windows(windows_of_words: List[List[str]]) -> Dict[str, int]:
"""Find the total count of unique words in each window, each window is bag-of-words"""
bags_of_words = map(set, windows_of_words)
return Counter(flatten_nested_iterables(bags_of_words))
def extract_word_ids_pair_to_counts(windows_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Dict[str, int]:
word_ids_pair_to_counts = Counter()
for window in windows_of_words:
for i in range(1, len(window)):
word_id_i = word_to_id[window[i]]
for j in range(i):
word_id_j = word_to_id[window[j]]
if word_id_i != word_id_j:
word_ids_pair_to_counts.update(['{},{}'.format(word_id_i, word_id_j),
'{},{}'.format(word_id_j, word_id_i)])
return dict(word_ids_pair_to_counts)
def extract_pmi_word_weights(windows_of_words: List[List[str]], word_to_id: Dict[str, int], vocab: List[str],
train_size: int) -> Tuple[List[int], List[int], List[float]]:
"""Calculate PMI as weights"""
weight_rows = [] # type: List[int]
weight_cols = [] # type: List[int]
pmi_weights = [] # type: List[float]
num_windows = len(windows_of_words)
word_counts_in_windows = extract_word_counts_in_windows(windows_of_words=windows_of_words)
word_ids_pair_to_counts = extract_word_ids_pair_to_counts(windows_of_words, word_to_id)
for word_id_pair, count in word_ids_pair_to_counts.items():
word_ids_in_str = word_id_pair.split(',')
word_id_i, word_id_j = int(word_ids_in_str[0]), int(word_ids_in_str[1])
word_i, word_j = vocab[word_id_i], vocab[word_id_j]
word_freq_i, word_freq_j = word_counts_in_windows[word_i], word_counts_in_windows[word_j]
pmi_score = log((1.0 * count / num_windows) / (1.0 * word_freq_i * word_freq_j / (num_windows * num_windows)))
if pmi_score > 0.0:
weight_rows.append(train_size + word_id_i)
weight_cols.append(train_size + word_id_j)
pmi_weights.append(pmi_score)
return weight_rows, weight_cols, pmi_weights
def extract_cosine_similarity_word_weights(vocab: List[str], train_size: int,
word_vec_path: str) -> Tuple[List[int], List[int], List[float]]:
"""Calculate Cosine Similarity of Word Vectors as weights"""
word_vectors = pickle.load(file=open(word_vec_path, 'rb')) # type: Dict[str,List[float]]
weight_rows = [] # type: List[int]
weight_cols = [] # type: List[int]
cos_sim_weights = [] # type: List[float]
for i, word_i in enumerate(vocab):
for j, word_j in enumerate(vocab):
if word_i in word_vectors and word_j in word_vectors:
vector_i = np.array(word_vectors[word_i])
vector_j = np.array(word_vectors[word_j])
similarity = 1.0 - cosine(vector_i, vector_j)
if similarity > 0.9:
print(word_i, word_j, similarity)
weight_rows.append(train_size + i)
weight_cols.append(train_size + j)
cos_sim_weights.append(similarity)
return weight_rows, weight_cols, cos_sim_weights
def extract_doc_word_ids_pair_to_counts(docs_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Dict[str, int]:
doc_word_freq = Counter()
for doc_id, doc_words in enumerate(docs_of_words):
for word in doc_words:
word_id = word_to_id[word]
doc_word_freq.update([str(doc_id) + ',' + str(word_id)])
return dict(doc_word_freq)
def extract_tf_idf_doc_word_weights(
adj_rows: List[int], adj_cols: List[int], adj_weights: List[float], vocab: List[str], train_size: int,
docs_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Tuple[List[int], List[int], List[float]]:
"""Extract Doc-Word weights with TF-IDF"""
doc_word_ids_pair_to_counts = extract_doc_word_ids_pair_to_counts(docs_of_words, word_to_id)
word_to_doc_ids = extract_word_to_doc_ids(docs_of_words=docs_of_words)
word_to_doc_counts = extract_word_to_doc_counts(word_to_doc_ids=word_to_doc_ids)
vocab_len = len(vocab)
num_docs = len(docs_of_words)
for doc_id, doc_words in enumerate(docs_of_words):
doc_word_set = set()
for word in doc_words:
if word not in doc_word_set:
word_id = word_to_id[word]
word_ids_pair_count = doc_word_ids_pair_to_counts[str(doc_id) + ',' + str(word_id)]
adj_rows.append(doc_id if doc_id < train_size else doc_id + vocab_len)
adj_cols.append(train_size + word_id)
doc_word_idf = log(1.0 * num_docs / word_to_doc_counts[vocab[word_id]])
adj_weights.append(word_ids_pair_count * doc_word_idf)
doc_word_set.add(word)
return adj_rows, adj_cols, adj_weights
def build_adjacency(ds_name: str, cfg: PreProcessingConfigs):
"""Build Adjacency Matrix of Doc-Word Heterogeneous Graph"""
# input files
ds_corpus = cfg.corpus_shuffled_dir + ds_name + ".txt"
ds_corpus_vocabulary = cfg.corpus_shuffled_vocab_dir + ds_name + '.vocab'
ds_corpus_train_idx = cfg.corpus_shuffled_split_index_dir + ds_name + '.train'
ds_corpus_test_idx = cfg.corpus_shuffled_split_index_dir + ds_name + '.test'
# checkers
check_data_set(data_set_name=ds_name, all_data_set_names=cfg.data_sets)
check_paths(ds_corpus, ds_corpus_vocabulary, ds_corpus_train_idx, ds_corpus_test_idx)
create_dir(dir_path=cfg.corpus_shuffled_adjacency_dir, overwrite=False)
docs_of_words = [line.split() for line in open(file=ds_corpus)]
vocab = open(ds_corpus_vocabulary).read().splitlines() # Extract Vocabulary.
word_to_id = {word: i for i, word in enumerate(vocab)} # Word to its id.
train_size = len(open(ds_corpus_train_idx).readlines()) # Real train-size, not adjusted.
test_size = len(open(ds_corpus_test_idx).readlines()) # Real test-size.
windows_of_words = extract_windows(docs_of_words=docs_of_words, window_size=20)
# Extract word-word weights
rows, cols, weights = extract_pmi_word_weights(windows_of_words, word_to_id, vocab, train_size)
# As an alternative, use cosine similarity of word vectors as weights:
# ds_corpus_word_vectors = cfg.CORPUS_WORD_VECTORS_DIR + ds_name + '.word_vectors'
# rows, cols, weights = extract_cosine_similarity_word_weights(vocab, train_size, ds_corpus_word_vectors)
# Extract word-doc weights
rows, cols, weights = extract_tf_idf_doc_word_weights(rows, cols, weights, vocab,
train_size, docs_of_words, word_to_id)
adjacency_len = train_size + len(vocab) + test_size
adjacency_matrix = csr_matrix((weights, (rows, cols)), shape=(adjacency_len, adjacency_len))
# Dump Adjacency Matrix
with open(cfg.corpus_shuffled_adjacency_dir + "/ind.{}.adj".format(ds_name), 'wb') as f:
pickle.dump(adjacency_matrix, f)
print("[INFO] Adjacency Dir='{}'".format(cfg.corpus_shuffled_adjacency_dir))
print("[INFO] ========= EXTRACTED ADJACENCY MATRIX: Heterogenous doc-word adjacency matrix. =========")
|
94613
|
import re
import pkuseg
from tqdm import tqdm
from collections import Counter
class Statistics():
def __init__(self,data):
self.data = data
self.min_length = 5
self.max_length = 100
self.post_num = 0
self.resp_num = 0
self.err_data = 0
def word_freq(self):
seg = pkuseg.pkuseg(model_name='web')
# seg = pkuseg.pkuseg()
stopwords = []
text = []
new_text = []
with open("stopwords.txt","r") as f:
stopwords = f.read()
for line in tqdm(self.data):
post, resp = line[0],line[1:]
text.extend(seg.cut(post))
for r in resp:
text.extend(seg.cut(r))
for word in text:
if word not in stopwords:
new_text.append(word)
couter = Counter(new_text)
print('Start create user_dictionary')
with open("word_user.txt","w") as fout:
for k,v in tqdm(couter.most_common()):
fout.write(k + '\t' + str(v) + '\n')
def check_sentence_length(self):
bucket_p = {}
bucket_r = {}
new_data = []
d = (self.max_length - self.min_length) / 10
for line in self.data:
resps = []
post,resp = line[0], line[1:]
self.post_num += 1
post = self.check_lenth(post)
k = str(int((len(post) - self.min_length) / d))
bucket_p[k] = bucket_p[k] + 1 if k in bucket_p else 1
for r in resp:
self.resp_num += 1
r = self.check_lenth(r)
k = str(int((len(r) - self.min_length) / d))
bucket_r[k] = bucket_r[k] + 1 if k in bucket_r else 1
if r: resps.append(r)
if not post or not resps: continue
new_data.append([post]+resps)
print('Total Post:%d , Response: %d , Pair: %d , Avg_Pair: %f ' % (self.post_num,self.resp_num,self.resp_num,1.0 * self.resp_num / self.post_num))
with open("sentence_length.txt","w") as f:
for kv in sorted(bucket_p.items(),key = lambda d: int(d[0])):
key = kv[0]
value = kv[1]
idx = int(key)
f.write('Post length %d - %d : %d \n' % (self.min_length + idx * d, self.min_length + (idx + 1) * d - 1, value))
for kv2 in sorted(bucket_r.items(),key = lambda d: int(d[0])):
key = kv2[0]
value = kv2[1]
idx = int(key)
f.write('Response length %d - %d : %d \n' % (self.min_length + idx * d, self.min_length + (idx + 1) * d - 1, value))
self.data = new_data
return new_data
def check_lenth(self,sentence):
if len(sentence) < self.min_length or len(sentence) > self.max_length:
with open("err_data.txt","w") as f:
f.write('empty data \n') if len(sentence) == 0 else f.write('error data: %s, %d\n' % (sentence,len(sentence)))
self.err_data += 1
return ""
return sentence
|
94631
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.SlugField()),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=models.CASCADE, to='contenttypes.ContentType')),
],
),
]
|
94693
|
from sys import exit
import argparse
import logging
_logger = logging.getLogger(__name__)
_LOGGING_FORMAT = '%(name)s.%(funcName)s[%(levelname)s]: %(message)s'
_DEBUG_LOGGING_FORMAT = '### %(asctime).19s.%(msecs).3s [%(levelname)s] %(name)s.%(funcName)s (%(filename)s:%(lineno)d) ###\n%(message)s'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-v", '--verbose',
help="enable verbose logging",
action="store_true")
parser.add_argument("--novr",
help="non-VR mode",
action="store_true")
parser.add_argument("-a", "--msaa", metavar='<multisample level>', type=int,
help='enable multi-sampled anti-aliasing at specified level (must be a non-negative power of 2); default is 4',
default=4)
parser.add_argument('--resolution', metavar='<width>x<height>',
help='OpenGL viewport resolution, e.g. 960x680',
default='960x680')
parser.add_argument('--fullscreen',
help='create fullscreen window',
action='store_true')
parser.add_argument('-o', "--ode",
help="use ODE for physics simulation instead of the default event-based physics engine",
action="store_true")
parser.add_argument("-c", "--collision-model", metavar='<name of collision model>',
help="set the ball-to-ball collision model to use (this parameter only applies to the event-based physics engine)",
default='simple')
# parser.add_argument('-q', '--use-quartic-solver',
# help="solve for collision times using the internal quartic solver instead of numpy.roots",
# action='store_true')
parser.add_argument('-s', '--sound-device', metavar='<device ID>',
help="enable sound using the specified device",
default=None)
parser.add_argument('-l', '--list-sound-devices',
help="list the available sound devices",
action="store_true")
parser.add_argument('--cube-map',
help='enable cube-mapped environmental texture',
action='store_true')
parser.add_argument('--glyphs',
help='render velocity and angular velocity glyphs',
action='store_true')
parser.add_argument('--speed', metavar='<factor>',
help='time speed-up/slow-down factor (default is 1.0, normal speed)',
default=1.0, type=float)
parser.add_argument('-r', '--realtime',
action='store_true',
help='enable the realtime version (intended for interactive usage) of the event-based physics engine')
parser.add_argument('--collision-search-time-forward', metavar='<time duration>',
help='''time into the future in seconds to calculate events for
before yielding to render a new frame - using this option enables the realtime engine''')
parser.add_argument('--collision-search-time-limit', metavar='<time duration>',
help='''maximum time in seconds to spend calculating events
before yielding to render a new frame - using this option enables the realtime engine''')
parser.add_argument('--balls-on-table', metavar='<list of ball numbers>',
help='comma-separated list of balls on table',
default=','.join(str(n) for n in range(16)))
parser.add_argument('--render-method', metavar='<render method name>',
help='OpenGL rendering method/style to use, one of: "ega", "lambert", "billboards", "raycast"',
default='raycast')
args = parser.parse_args()
args.msaa = int(args.msaa)
args.balls_on_table = [int(n) for n in args.balls_on_table.split(',')]
args.resolution = [int(x) for x in args.resolution.split('x')]
if args.collision_search_time_limit is not None:
collision_search_time_limit = float(args.collision_search_time_limit)
elif args.realtime:
collision_search_time_limit = None
else:
collision_search_time_limit = None
args.collision_search_time_limit = collision_search_time_limit
if args.collision_search_time_forward is not None:
collision_search_time_forward = float(args.collision_search_time_forward)
elif args.realtime:
collision_search_time_forward = 4.0/90
else:
collision_search_time_forward = None
args.collision_search_time_forward = collision_search_time_forward
return args
def main():
args = parse_args()
if args.verbose:
logging.basicConfig(format=_DEBUG_LOGGING_FORMAT, level=logging.DEBUG)
else:
logging.basicConfig(format=_LOGGING_FORMAT, level=logging.WARNING)
if args.list_sound_devices:
from .sound import list_sound_devices
list_sound_devices()
exit(0)
if args.sound_device:
start_sound(args.sound_device)
import poolvr.app
poolvr.app.main(novr=args.novr,
ball_collision_model=args.collision_model,
use_ode=args.ode,
multisample=args.msaa,
cube_map=args.cube_map,
speed=args.speed,
glyphs=args.glyphs,
balls_on_table=args.balls_on_table,
render_method=args.render_method,
# use_quartic_solver=args.use_quartic_solver,
use_quartic_solver=True,
collision_search_time_forward=args.collision_search_time_forward,
collision_search_time_limit=args.collision_search_time_limit,
fullscreen=args.fullscreen,
window_size=args.resolution)
def start_sound(sound_device):
try:
sound_device = int(sound_device)
try:
import poolvr.sound
try:
poolvr.sound.set_output_sound_device(sound_device)
except Exception as err:
_logger.error('could not set output sound device:\n%s', err)
except Exception as err:
_logger.error('could not import poolvr.sound:\n%s', err)
except Exception as err:
_logger.error('could not parse parameter "--sound-device %s":\n%s', sound_device, err)
if __name__ == "__main__":
main()
|
94696
|
import re
import pandas as pd
def update_simulation_date_time(lines, start_line, new_datetime):
"""
replace both the analysis and reporting start date and times
"""
new_date = new_datetime.strftime("%m/%d/%Y")
new_time = new_datetime.strftime("%H:%M:%S")
lines[start_line] = re.sub(r'\d{2}\\\d{2}\\\d{2}', new_date,
lines[start_line])
lines[start_line+1] = re.sub(r'\d{2}:\d{2}:\d{2}', new_time,
lines[start_line+1])
lines[start_line+2] = re.sub(r'\d{2}\\\d{2}\\\d{2}', new_date,
lines[start_line+2])
lines[start_line+3] = re.sub(r'\d{2}:\d{2}:\d{2}', new_time,
lines[start_line+3])
return lines
def update_process_model_file(inp_file, new_date_time, hs_file):
with open(inp_file, 'r') as tmp_file:
lines = tmp_file.readlines()
# update date and times
date_section_start, date_section_end = find_section(lines, "START_DATE")
update_simulation_date_time(lines, date_section_start, new_date_time)
# update to use hotstart file
file_section_start, file_section_end = find_section(lines, "[FILES]")
new_hotstart_string = get_file_section_string(hs_file)
lines = update_section(lines, new_hotstart_string, file_section_start,
file_section_end)
with open(inp_file, 'w') as tmp_file:
tmp_file.writelines(lines)
def find_section(lines, section_name):
start_line = None
end_line = None
for i, l in enumerate(lines):
if l.startswith("{}".format(section_name)):
start_line = i
for j, ll in enumerate(lines[i+1:]):
if ll.startswith("["):
end_line = j + i
break
if not end_line:
end_line = len(lines)
return start_line, end_line
def update_section(lines, new_lines, old_section_start=None,
old_section_end=None):
"""
lines: list of strings; text of .inp file read into list of strings
new_lines: list of strings; list of strings for replacing old section
old_section_start: int; position of line where replacing should start
(will append to end of file if 'None' and section end
is 'None' passed as argument)
old_section_end: int; position of line where replacing should end
"""
if old_section_start and old_section_end:
del lines[old_section_start: old_section_end]
else:
old_section_start = len(lines)
lines[old_section_start: old_section_start] = new_lines
return lines
def get_file_section_string(hs_filename):
new_lines = ["[FILES] \n"]
new_lines.append('USE HOTSTART "{}"\n \n'.format(hs_filename))
return new_lines
def get_control_rule_string(control_time_step, policies):
"""
Write control rules from the policies.
"""
new_lines = ["[CONTROLS]\n"]
rule_number = 0
# control_time_step is in seconds. convert to hours
control_time_step_hours = control_time_step/3600.
for structure_id in policies:
structure_type = structure_id.split()[0]
for i, policy_step in enumerate(policies[structure_id]):
l1 = "RULE R{}\n".format(rule_number)
l2 = "IF SIMULATION TIME < {:.3f}\n".format(
(i+1) * control_time_step_hours)
# check the structure type to write 'SETTINGS' or 'STATUS'
if structure_type == 'ORIFICE' or structure_type == 'WEIR':
sttg_or_status = 'SETTING'
elif structure_type == 'PUMP':
sttg_or_status = 'STATUS'
l3 = "THEN {} {} = {}\n".format(structure_id, sttg_or_status,
policy_step)
l4 = "\n"
new_lines.extend([l1, l2, l3, l4])
rule_number += 1
return new_lines
def update_controls_and_hotstart(inp_file, control_time_step, policies,
hs_file=None):
"""
control_time_step: number; in seconds
policies: dict; structure id (e.g., ORIFICE R1) as key, list of settings
as value;
"""
with open(inp_file, 'r') as inpfile:
lines = inpfile.readlines()
control_line, end_control_line = find_section(lines, "[CONTROLS]")
control_rule_string = get_control_rule_string(control_time_step, policies)
updated_lines = update_section(lines, control_rule_string, control_line,
end_control_line)
if hs_file:
file_section_start, file_section_end = find_section(updated_lines,
"[FILES]")
hs_lines = get_file_section_string(hs_file)
updated_lines = update_section(updated_lines, hs_lines,
file_section_start,
file_section_end)
with open(inp_file, 'w') as inpfile:
inpfile.writelines(updated_lines)
def update_controls_with_policy(inp_file, policy_file):
policy_df = pd.read_csv(policy_file)
control_time_step = get_control_time_step(policy_df)
policy_columns = [col for col in policy_df.columns if "setting" in col]
policy_dict = {}
for policy_col in policy_columns:
structure_id = policy_col.split("_")[-1]
policy_dict[structure_id] = policy_df[policy_col].tolist()
update_controls_and_hotstart(inp_file, control_time_step, policy_dict)
def remove_control_section(inp_file):
with open(inp_file, 'r') as inpfile:
lines = inpfile.readlines()
control_line, end_control_line = find_section(lines, "[CONTROLS]")
if control_line and end_control_line:
del lines[control_line: end_control_line]
with open(inp_file, 'w') as inpfile:
inpfile.writelines(lines)
def read_hs_filename(inp_file):
with open(inp_file, 'r') as f:
for line in f:
if line.startswith("USE HOTSTART"):
hs_filename = line.split()[-1].replace('"', '')
return hs_filename
def get_control_time_step(df, dt_col="datetime"):
times = (pd.to_datetime(df[dt_col]))
delta_times = times.diff()
time_step = delta_times.mean().seconds
if not time_step % 60:
return time_step
# if it's only off by 1 or two seconds then round down to nearest minute
elif time_step % 60 < 3:
time_step -= time_step % 60
return time_step
else:
raise Exception("The time step in your file is in between minutes")
|
94734
|
from __future__ import print_function
import pytest
import sys
from operator import add
import findspark
findspark.init()
from pyspark import SparkContext, SparkConf, SQLContext, Row
import os, subprocess, json, riak, time
import pyspark_riak
import timeout_decorator
import datetime
import tzlocal
import pytz
import math
from pyspark_tests_fixtures import *
from random import randint
#### Notes ####
'''
Saving ints to riak ts preserves the value of the timestamp.
Querying ints using riak client is, in this case, simple, just query the int range
Saving datetimes to riak ts, the datetimes will be treated as local time, converted then to gmt time.
You can query with riak client by int only, so in this case you must convert your local datetime to utc int.
If you do ts_get, you can use local datetime to query. The query will be converted automatically to utc before query.
Reading datetime from ts using spark timestamp option will convert datetime back to local datetime.
'''
###### FUNCTIONS #######
def setup_table(client):
riak_ts_table_name = 'spark-riak-%d' % int(time.time())
riak_ts_table = client.table(riak_ts_table_name)
create_sql = """CREATE TABLE %(table_name)s (
field1 varchar not null,
field2 varchar not null,
datetime timestamp not null,
data sint64,
PRIMARY KEY ((field1, field2, quantum(datetime, 24, h)), field1, field2, datetime))
""" % ({'table_name': riak_ts_table_name})
return riak_ts_table_name, create_sql, riak_ts_table
def setup_kv_obj(client, bucket_name, key, content_type, data):
bucket = client.bucket(bucket_name)
obj = riak.RiakObject(client, bucket, key)
obj.content_type = content_type
obj.data = data
return obj
def setup_ts_obj(ts_table, data):
return ts_table.new(data)
def unix_time_seconds(dt):
td = dt - datetime.datetime.utcfromtimestamp(0)
return int(td.total_seconds())
def unix_time_millis(dt):
td = unix_time_seconds(dt)
return int(td * 1000.0)
def make_data_long(start_date, N, M):
data = []
one_second = datetime.timedelta(seconds=1)
one_day = datetime.timedelta(days=1)
for i in range(M):
for j in range(N):
data.append(['field1_val', 'field2_val', unix_time_millis(start_date + i*one_day + j*one_second), i+j])
end_date = start_date + (M-1)*one_day + (N-1)*one_second
return data, start_date, end_date
def make_data_timestamp(start_date, N, M):
timestamp_data = []
long_data = []
one_second = datetime.timedelta(seconds=1)
one_day = datetime.timedelta(days=1)
local_start_date = convert_to_local_dt(start_date)
for i in range(M):
for j in range(N):
cur_local_timestamp = local_start_date + i*one_day + j*one_second
timestamp_data.append(['field1_val', 'field2_val', cur_local_timestamp, i+j])
long_data.append(['field1_val', 'field2_val', unix_time_millis(convert_dt_to_gmt_dt(cur_local_timestamp)), i+j])
start_timestamp = convert_dt_to_gmt_dt(timestamp_data[0][2])
end_timestamp = convert_dt_to_gmt_dt(timestamp_data[-1][2])
start_long = long_data[0][2]
end_long = long_data[-1][2]
return timestamp_data, start_timestamp, end_timestamp, long_data, start_long, end_long
def convert_dt_to_gmt_dt(dt):
gmt_dt_with_tzinfo = pytz.utc.normalize(dt)
year = gmt_dt_with_tzinfo.year
month = gmt_dt_with_tzinfo.month
day = gmt_dt_with_tzinfo.day
hour = gmt_dt_with_tzinfo.hour
minute = gmt_dt_with_tzinfo.minute
second = gmt_dt_with_tzinfo.second
gmt_dt = datetime.datetime(year, month, day, hour, minute, second)
return gmt_dt
def convert_to_local_dt(dt):
# local_tz = tzlocal.get_localzone()
local_tz = pytz.utc
local_dt = local_tz.localize(dt)
return local_dt
def convert_local_dt_to_gmt_dt(dt):
local_dt = convert_to_local_dt(dt)
return convert_dt_to_gmt_dt(local_dt)
def make_table_with_data(N, M, useLong, spark_context, riak_client):
riak_ts_table_name, create_sql, riak_ts_table = setup_table(riak_client)
riak_ts_table.query(create_sql)
seed_date = datetime.datetime(2016, 1, 1, 12, 0, 0)
if useLong:
test_data, start, end = make_data_long(seed_date, N, M)
test_rdd = spark_context.parallelize(test_data)
else:
timestamp_data, start_timestamp, end_timestamp, long_data, start_long, end_long = make_data_timestamp(seed_date, N, M)
test_rdd = spark_context.parallelize(timestamp_data)
test_df = test_rdd.toDF(['field1', 'field2', 'datetime', 'data'])
test_df.write.format('org.apache.spark.sql.riak').mode('Append').save(riak_ts_table_name)
if useLong:
return start, end, riak_ts_table_name, test_df, test_rdd, test_data, riak_ts_table
else:
return start_timestamp, end_timestamp, riak_ts_table_name, test_df, test_rdd, timestamp_data, long_data, start_long, end_long, riak_ts_table
def make_kv_data(N, spark_context):
source_data = []
test_data = []
keys = []
bad_keys = []
for i in range(N):
keys.append(str(u'key'+str(i)))
source_data.append({str(u'key'+str(i)) : {u'data' : i}})
test_data.append( (str(u'key'+str(i)),{u'data' : i}))
bad_keys.append(str(i))
source_rdd = spark_context.parallelize(source_data)
return source_rdd, source_data, test_data, keys, bad_keys
def make_kv_data_2i(N, test_bucket_name, riak_client):
bucket = riak_client.bucket_type('default').bucket(test_bucket_name)
test_data = []
string2i = []
integer2i = []
partitions = []
bad_partitions = []
for i in range(N):
obj = riak.RiakObject(riak_client, bucket, str(u'key'+str(i)))
obj.content_type = 'application/json'
obj.data = {u'data' : i}
obj.add_index('string_index_bin', 'string_val_'+str(i))
obj.add_index('integer_index_int', i)
obj.store()
test_data.append((str('key'+str(i)),{u'data' : i}))
string2i.append('string_val_'+str(i))
integer2i.append(i)
partitions.append((i,i))
bad_partitions.append((N+i,N+i))
return test_data, string2i, integer2i, partitions, bad_partitions
def make_filter(useLong, start, end):
if useLong:
temp_filter = """datetime >= %(start_date)s
AND datetime <= %(end_date)s
AND field1 = '%(field1)s'
AND field2 = '%(field2)s'
""" % ({'start_date': unix_time_millis(start), 'end_date': unix_time_millis(end), 'field1': 'field1_val', 'field2': 'field2_val'})
else:
temp_filter = """datetime >= CAST(%(start_date)s AS TIMESTAMP)
AND datetime <= CAST(%(end_date)s AS TIMESTAMP)
AND field1 = '%(field1)s'
AND field2 = '%(field2)s'
""" % ({'start_date': start, 'end_date': end, 'field1': 'field1_val', 'field2': 'field2_val'})
return temp_filter
def make_ts_query(riak_ts_table_name, start, end):
fmt = """
select * from {table_name}
where datetime >= {start_date}
AND datetime <= {end_date}
AND field1 = '{field1}'
AND field2 = '{field2}'
"""
query = fmt.format(table_name=riak_ts_table_name, start_date=unix_time_millis(start), end_date=unix_time_millis(end), field1='field1_val', field2='field2_val')
return query
###### TESTS #######
# def _test_connection(spark_context, riak_client, sql_context):
#
# riak_client.ping()
#
# obj = setup_kv_obj(riak_client, 'temp_bucket', 'temp_key', 'text/plain', 'temp_data')
#
# obj.store()
#
# result = riak_client.bucket('temp_bucket').get('temp_key')
#
# assert result.data == 'temp_data'
#
# riak_ts_table_name, create_sql, riak_ts_table = setup_table(riak_client)
#
# riak_ts_table.query(create_sql)
#
# time.sleep(5)
#
# ts_obj = setup_ts_obj(riak_ts_table, [['field1_val', 'field2_val', unix_time_millis(datetime.datetime(2015, 1, 1, 12, 0, 0)), 0]])
#
# ts_obj.store()
#
# result = riak_client.ts_get(riak_ts_table_name, ['field1_val', 'field2_val', unix_time_millis(datetime.datetime(2015, 1, 1, 12, 0, 0))])
#
# assert result.rows == [['field1_val', 'field2_val', unix_time_millis(datetime.datetime(2015, 1, 1, 12, 0, 0)), 0]]
###### Riak TS Test #######
def _test_spark_df_ts_write_use_long(N, M, spark_context, riak_client, sql_context):
useLong=True
start, end, riak_ts_table_name, test_df, test_rdd, test_data, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
query = make_ts_query(riak_ts_table_name, start, end)
result = riak_ts_table.query(query)
assert sorted(result.rows, key=lambda x: x[2]) == sorted(test_rdd.collect(), key=lambda x: x[2])
def _test_spark_df_ts_write_use_timestamp(N, M, spark_context, riak_client, sql_context):
useLong=False
start_timestamp, end_timestamp, riak_ts_table_name, test_df, test_rdd, timestamp_data, long_data, start_long, end_long, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
query = make_ts_query(riak_ts_table_name, start_timestamp, end_timestamp)
result = riak_ts_table.query(query)
assert sorted(result.rows, key=lambda x: x[2]) == sorted(spark_context.parallelize(long_data).collect(), key=lambda x: x[2])
def _test_spark_df_ts_read_use_long(N, M, spark_context, riak_client, sql_context):
useLong=True
start, end, riak_ts_table_name, test_df, test_rdd, test_data, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, start, end)
result = sql_context.read.format("org.apache.spark.sql.riak").option("spark.riakts.bindings.timestamp", "useLong").load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
def _test_spark_df_ts_read_use_long_ts_quantum(N, M, spark_context, riak_client, sql_context):
useLong=True
start, end, riak_ts_table_name, test_df, test_rdd, test_data, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, start, end)
result = sql_context.read.format("org.apache.spark.sql.riak") \
.option("spark.riakts.bindings.timestamp", "useLong") \
.option("spark.riak.partitioning.ts-quantum", "24h") \
.load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
def _test_spark_df_ts_read_use_timestamp(N, M, spark_context, riak_client, sql_context):
useLong=False
start_timestamp, end_timestamp, riak_ts_table_name, test_df, test_rdd, timestamp_data, long_data, start_long, end_long, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, unix_time_seconds(start_timestamp), unix_time_seconds(end_timestamp))
result = sql_context.read.format("org.apache.spark.sql.riak").option("spark.riakts.bindings.timestamp", "useTimestamp").load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
def _test_spark_df_ts_read_use_timestamp_ts_quantum(N, M, spark_context, riak_client, sql_context):
useLong=False
start_timestamp, end_timestamp, riak_ts_table_name, test_df, test_rdd, timestamp_data, long_data, start_long, end_long, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, unix_time_seconds(start_timestamp), unix_time_seconds(end_timestamp))
result = sql_context.read.format("org.apache.spark.sql.riak").option("spark.riakts.bindings.timestamp", "useTimestamp").option("spark.riak.partitioning.ts-quantum", "24h").load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
def _test_spark_df_ts_range_query_input_split_count_use_long(N, M, S,spark_context, riak_client, sql_context):
useLong=True
start, end, riak_ts_table_name, test_df, test_rdd, test_data, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
time.sleep(1)
temp_filter = make_filter(useLong, start, end)
result = sql_context.read.format("org.apache.spark.sql.riak") \
.option("spark.riakts.bindings.timestamp", "useLong") \
.option("spark.riak.input.split.count", str(S)) \
.option("spark.riak.partitioning.ts-range-field-name", "datetime") \
.load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
assert result.rdd.getNumPartitions() == S
def _test_spark_df_ts_range_query_input_split_count_use_long_ts_quantum(N, M, S,spark_context, riak_client, sql_context):
useLong=True
start, end, riak_ts_table_name, test_df, test_rdd, test_data, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, start, end)
result = sql_context.read.format("org.apache.spark.sql.riak") \
.option("spark.riakts.bindings.timestamp", "useLong") \
.option("spark.riak.partitioning.ts-quantum", "24h") \
.option("spark.riak.input.split.count", str(S)) \
.option("spark.riak.partitioning.ts-range-field-name", "datetime") \
.load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
assert result.rdd.getNumPartitions() == S
def _test_spark_df_ts_range_query_input_split_count_use_timestamp(N, M, S,spark_context, riak_client, sql_context):
useLong=False
start_timestamp, end_timestamp, riak_ts_table_name, test_df, test_rdd, timestamp_data, long_data, start_long, end_long, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, unix_time_seconds(start_timestamp), unix_time_seconds(end_timestamp))
result = sql_context.read.format("org.apache.spark.sql.riak") \
.option("spark.riakts.bindings.timestamp", "useTimestamp") \
.option("spark.riak.input.split.count", str(S)) \
.option("spark.riak.partitioning.ts-range-field-name", "datetime") \
.load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
assert result.rdd.getNumPartitions() == S
def _test_spark_df_ts_range_query_input_split_count_use_timestamp_ts_quantum(N, M, S,spark_context, riak_client, sql_context):
useLong=False
start_timestamp, end_timestamp, riak_ts_table_name, test_df, test_rdd, timestamp_data, long_data, start_long, end_long, riak_ts_table = make_table_with_data(N, M, useLong, spark_context, riak_client)
temp_filter = make_filter(useLong, unix_time_seconds(start_timestamp), unix_time_seconds(end_timestamp))
result = sql_context.read.format("org.apache.spark.sql.riak") \
.option("spark.riakts.bindings.timestamp", "useTimestamp") \
.option("spark.riak.partitioning.ts-quantum", "24h") \
.option("spark.riak.input.split.count", str(S)) \
.option("spark.riak.partitioning.ts-range-field-name", "datetime") \
.load(riak_ts_table_name).filter(temp_filter)
assert sorted(result.collect(), key=lambda x: x[2]) == sorted(test_df.collect(), key=lambda x: x[2])
assert result.rdd.getNumPartitions() == S
###### Riak KV Tests ######
def _test_spark_rdd_write_kv(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
source_rdd, source_data, test_data, keys, bad_keys = make_kv_data(N, spark_context)
source_rdd.saveToRiak(test_bucket_name, "default")
test_data = [{x.key: x.data} for x in riak_client.bucket(test_bucket_name).multiget(keys)]
assert sorted(source_data) == sorted(test_data)
def _test_spark_rdd_kv_read_query_all(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
source_rdd, source_data, test_data, keys, bad_keys = make_kv_data(N, spark_context)
source_rdd.saveToRiak(test_bucket_name, "default")
result = spark_context.riakBucket(test_bucket_name).queryAll()
assert sorted(result.collect(), key=lambda x: x[0]) == sorted(test_data, key=lambda x: x[0])
def _test_spark_rdd_kv_read_query_bucket_keys(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
source_rdd, source_data, test_data, keys, bad_keys = make_kv_data(N, spark_context)
source_rdd.saveToRiak(test_bucket_name)
result = spark_context.riakBucket(test_bucket_name).queryBucketKeys(*keys)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted(test_data, key=lambda x: x[0])
result = spark_context.riakBucket(test_bucket_name).queryBucketKeys(*bad_keys)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted([], key=lambda x: x[0])
def _test_spark_rdd_kv_read_query_2i_keys(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
test_data, string2i, integer2i, partitions, bad_partitions = make_kv_data_2i(N, test_bucket_name, riak_client)
result = spark_context.riakBucket(test_bucket_name).query2iKeys('string_index', *string2i)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted(test_data, key=lambda x: x[0])
result = spark_context.riakBucket(test_bucket_name).query2iKeys('integer_index', *integer2i)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted(test_data, key=lambda x: x[0])
def _test_spark_rdd_kv_read_query2iRange(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
test_data, string2i, integer2i, partitions, bad_partitions = make_kv_data_2i(N, test_bucket_name, riak_client)
result = spark_context.riakBucket(test_bucket_name).query2iRange('integer_index', integer2i[0], integer2i[-1])
assert sorted(result.collect(), key=lambda x: x[0]) == sorted(test_data, key=lambda x: x[0])
result = spark_context.riakBucket(test_bucket_name).query2iRange('integer_index', N, 2*N)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted([], key=lambda x: x[0])
def _test_spark_rdd_kv_read_partition_by_2i_range(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
test_data, string2i, integer2i, partitions, bad_partitions = make_kv_data_2i(N, test_bucket_name, riak_client)
result = spark_context.riakBucket(test_bucket_name).partitionBy2iRanges('integer_index', *partitions)
assert sorted(result.collect(), key=lambda x: x[0]), sorted(test_data, key=lambda x: x[0])
assert result.getNumPartitions() == N
result = spark_context.riakBucket(test_bucket_name).partitionBy2iRanges('integer_index', *bad_partitions)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted([], key=lambda x: x[0])
assert result.getNumPartitions() == N
def _test_spark_rdd_kv_read_partition_by_2i_keys(N, spark_context, riak_client, sql_context):
test_bucket_name = "test-bucket-"+str(randint(0,100000))
test_data, string2i, integer2i, partitions, bad_partitions = make_kv_data_2i(N, test_bucket_name, riak_client)
result = spark_context.riakBucket(test_bucket_name).partitionBy2iKeys('string_index', *string2i)
assert sorted(result.collect(), key=lambda x: x[0]), sorted(test_data, key=lambda x: x[0])
assert result.getNumPartitions() == N
bad_strings = ['no', 'nein', 'net']
result = spark_context.riakBucket(test_bucket_name).partitionBy2iKeys('string_index', *bad_strings)
assert sorted(result.collect(), key=lambda x: x[0]) == sorted([], key=lambda x: x[0])
assert result.getNumPartitions() == len(bad_strings)
###### Run Tests ######
# def test_con(spark_context, riak_client, sql_context):
# _test_connection(spark_context, riak_client, sql_context)
###### KV Tests #######
@pytest.mark.riakkv
def test_kv_write(spark_context, riak_client, sql_context):
_test_spark_rdd_write_kv(10, spark_context, riak_client, sql_context)
@pytest.mark.riakkv
def test_kv_query_all(spark_context, riak_client, sql_context):
_test_spark_rdd_kv_read_query_all(10, spark_context, riak_client, sql_context)
@pytest.mark.riakkv
def test_kv_query_bucket_keys(spark_context, riak_client, sql_context):
_test_spark_rdd_kv_read_query_bucket_keys(10, spark_context, riak_client, sql_context)
@pytest.mark.riakkv
def test_kv_query_2i_keys(spark_context, riak_client, sql_context):
_test_spark_rdd_kv_read_query_2i_keys(10, spark_context, riak_client, sql_context)
@pytest.mark.riakkv
def test_kv_query_2i_range(spark_context, riak_client, sql_context):
_test_spark_rdd_kv_read_query2iRange(10, spark_context, riak_client, sql_context)
@pytest.mark.riakkv
def test_kv_query_partition_by_2i_range(spark_context, riak_client, sql_context):
_test_spark_rdd_kv_read_partition_by_2i_range(10, spark_context, riak_client, sql_context)
@pytest.mark.riakkv
def test_kv_query_partition_by_2i_keys(spark_context, riak_client, sql_context):
_test_spark_rdd_kv_read_partition_by_2i_keys(10, spark_context, riak_client, sql_context)
#
# if object values are JSON objects with more than 4 keys exception happens
# https://github.com/basho/spark-riak-connector/issues/206
@pytest.mark.regression
@pytest.mark.riakkv
def test_read_JSON_value_with_more_then_4_fields(spark_context, riak_client):
bucket = riak_client.bucket("test-bucket-"+str(randint(0,100000)))
item = bucket.new("test-key")
item.data = {'field1': 'abc',
'field2': 'def',
'field3': 'ABC123',
'field4': 'home',
'field5': '10',
'field6': '10.0.0.1',
'field7': '1479398907',
'field8': '1479398907',
'field9': 'DEF456,GHI789',
'field11': 'JKL000',
'field12': 'abc'}
item.store()
result = spark_context.riakBucket(bucket.name).queryBucketKeys("test-key").collect()
#
# if object value is a JSON object that contains a List of values, exception raised
# https://bashoeng.atlassian.net/browse/SPARK-275
#
@pytest.mark.regression
@pytest.mark.riakkv
def test_read_JSON_value_with_an_empty_list (spark_context, riak_client):
bucket = riak_client.bucket("test-bucket-"+str(randint(0,100000)))
item = bucket.new("test-key")
item.data = {u'client_ip': u'172.16.58.3',
u'created_time': 1481562884357,
u'event_keys': []}
item.store()
result = spark_context.riakBucket(bucket.name).queryBucketKeys("test-key").collect()
#
# if object value is a JSON object that contains a List of values, exception raised
# https://bashoeng.atlassian.net/browse/SPARK-275
#
@pytest.mark.regression
@pytest.mark.riakkv
def test_read_JSON_value_with_not_empty_list (spark_context, riak_client):
bucket = riak_client.bucket("test-bucket-"+str(randint(0,100000)))
item = bucket.new("test-key")
item.data = {"session_ids":["t_sess_1401"],
"last_active_time":1481562896697,
"ecompany":"test.riak.ecompany.com.1"}
item.store()
result = spark_context.riakBucket(bucket.name).queryBucketKeys("test-key").collect()
#
# if object value is a JSON object that contains an empty Object, exception raised
# https://bashoeng.atlassian.net/browse/SPARK-281
#
@pytest.mark.regression
@pytest.mark.riakkv
def test_read_JSON_value_with_an_empty_map (spark_context, riak_client):
bucket = riak_client.bucket("test-bucket-"+str(randint(0,100000)))
item = bucket.new("test-key-empty-object")
item.data = {u'client_ip': u'172.16.58.3',
u'created_time': 1481562884357,
u'event_keys': {}}
item.store()
result = spark_context.riakBucket(bucket.name).queryBucketKeys("test-key-empty-object").collect()
###### TS Tests #######
@pytest.mark.riakts
def test_ts_df_write_use_timestamp(spark_context, riak_client, sql_context):
_test_spark_df_ts_write_use_timestamp(10, 5, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_write_use_long(spark_context, riak_client, sql_context):
_test_spark_df_ts_write_use_long(10, 5, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_read_use_timestamp(spark_context, riak_client, sql_context):
_test_spark_df_ts_read_use_timestamp(10, 5, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_read_use_long(spark_context, riak_client, sql_context):
_test_spark_df_ts_read_use_long(10, 5, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_read_use_timestamp_ts_quantum(spark_context, riak_client, sql_context):
_test_spark_df_ts_read_use_timestamp_ts_quantum(10, 5, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_read_use_long_ts_quantum(spark_context, riak_client, sql_context):
_test_spark_df_ts_read_use_long_ts_quantum(10, 5, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_range_query_input_split_count_use_timestamp(spark_context, riak_client, sql_context):
_test_spark_df_ts_range_query_input_split_count_use_timestamp(10, 5, 3, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_range_query_input_split_count_use_long(spark_context, riak_client, sql_context):
_test_spark_df_ts_range_query_input_split_count_use_long(10, 5, 3, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_range_query_input_split_count_use_timestamp_ts_quantum(spark_context, riak_client, sql_context):
_test_spark_df_ts_range_query_input_split_count_use_timestamp_ts_quantum(10, 5, 3, spark_context, riak_client, sql_context)
@pytest.mark.riakts
def test_ts_df_range_query_input_split_count_use_long_ts_quantum(spark_context, riak_client, sql_context):
_test_spark_df_ts_range_query_input_split_count_use_long_ts_quantum(10, 5, 3, spark_context, riak_client, sql_context)
|
94736
|
from stochastic.processes.continuous import *
from stochastic.processes.diffusion import *
from stochastic.processes.discrete import *
from stochastic.processes.noise import *
|
94763
|
import six
import collections
from chef.base import ChefObject
from chef.exceptions import ChefError
class NodeAttributes(collections.MutableMapping):
"""A collection of Chef :class:`~chef.Node` attributes.
Attributes can be accessed like a normal python :class:`dict`::
print node['fqdn']
node['apache']['log_dir'] = '/srv/log'
When writing to new attributes, any dicts required in the hierarchy are
created automatically.
.. versionadded:: 0.1
"""
def __init__(self, search_path=[], path=None, write=None):
if not isinstance(search_path, collections.Sequence):
search_path = [search_path]
self.search_path = search_path
self.path = path or ()
self.write = write
def __iter__(self):
keys = set()
for d in self.search_path:
keys |= set(six.iterkeys(d))
return iter(keys)
def __len__(self):
l = 0
for key in self:
l += 1
return l
def __getitem__(self, key):
for d in self.search_path:
if key in d:
value = d[key]
break
else:
raise KeyError(key)
if not isinstance(value, dict):
return value
new_search_path = []
for d in self.search_path:
new_d = d.get(key, {})
if not isinstance(new_d, dict):
# Structural mismatch
new_d = {}
new_search_path.append(new_d)
return self.__class__(new_search_path, self.path+(key,), write=self.write)
def __setitem__(self, key, value):
if self.write is None:
raise ChefError('This attribute is not writable')
dest = self.write
for path_key in self.path:
dest = dest.setdefault(path_key, {})
dest[key] = value
def __delitem__(self, key):
if self.write is None:
raise ChefError('This attribute is not writable')
dest = self.write
for path_key in self.path:
dest = dest.setdefault(path_key, {})
del dest[key]
def has_dotted(self, key):
"""Check if a given dotted key path is present. See :meth:`.get_dotted`
for more information on dotted paths.
.. versionadded:: 0.2
"""
try:
self.get_dotted(key)
except KeyError:
return False
else:
return True
def get_dotted(self, key):
"""Retrieve an attribute using a dotted key path. A dotted path
is a string of the form `'foo.bar.baz'`, with each `.` separating
hierarcy levels.
Example::
node.attributes['apache']['log_dir'] = '/srv/log'
print node.attributes.get_dotted('apache.log_dir')
"""
value = self
for k in key.split('.'):
if not isinstance(value, NodeAttributes):
raise KeyError(key)
value = value[k]
return value
def set_dotted(self, key, value):
"""Set an attribute using a dotted key path. See :meth:`.get_dotted`
for more information on dotted paths.
Example::
node.attributes.set_dotted('apache.log_dir', '/srv/log')
"""
dest = self
keys = key.split('.')
last_key = keys.pop()
for k in keys:
if k not in dest:
dest[k] = {}
dest = dest[k]
if not isinstance(dest, NodeAttributes):
raise ChefError
dest[last_key] = value
def to_dict(self):
merged = {}
for d in reversed(self.search_path):
merged.update(d)
return merged
class Node(ChefObject):
"""A Chef node object.
The Node object can be used as a dict-like object directly, as an alias for
the :attr:`.attributes` data::
>>> node = Node('name')
>>> node['apache']['log_dir']
'/var/log/apache2'
.. versionadded:: 0.1
.. attribute:: attributes
:class:`~chef.node.NodeAttributes` corresponding to the composite of all
precedence levels. This only uses the stored data on the Chef server,
it does not merge in attributes from roles or environments on its own.
::
>>> node.attributes['apache']['log_dir']
'/var/log/apache2'
.. attribute:: run_list
The run list of the node. This is the unexpanded list in ``type[name]``
format.
::
>>> node.run_list
['role[base]', 'role[app]', 'recipe[web]']
.. attribute:: chef_environment
The name of the Chef :class:`~chef.Environment` this node is a member
of. This value will still be present, even if communicating with a Chef
0.9 server, but will be ignored.
.. versionadded:: 0.2
.. attribute:: default
:class:`~chef.node.NodeAttributes` corresponding to the ``default``
precedence level.
.. attribute:: normal
:class:`~chef.node.NodeAttributes` corresponding to the ``normal``
precedence level.
.. attribute:: override
:class:`~chef.node.NodeAttributes` corresponding to the ``override``
precedence level.
.. attribute:: automatic
:class:`~chef.node.NodeAttributes` corresponding to the ``automatic``
precedence level.
"""
url = '/nodes'
attributes = {
'default': NodeAttributes,
'normal': lambda d: NodeAttributes(d, write=d),
'override': NodeAttributes,
'automatic': NodeAttributes,
'run_list': list,
'chef_environment': str
}
def has_key(self, key):
return self.attributes.has_dotted(key)
def get(self, key, default=None):
return self.attributes.get(key, default)
def __getitem__(self, key):
return self.attributes[key]
def __setitem__(self, key, value):
self.attributes[key] = value
def _populate(self, data):
if not self.exists:
# Make this exist so the normal<->attributes cross-link will
# function correctly
data['normal'] = {}
data.setdefault('chef_environment', '_default')
super(Node, self)._populate(data)
self.attributes = NodeAttributes((data.get('automatic', {}),
data.get('override', {}),
data['normal'], # Must exist, see above
data.get('default', {})), write=data['normal'])
def cookbooks(self, api=None):
api = api or self.api
return api[self.url + '/cookbooks']
|
94784
|
from __future__ import absolute_import
from __future__ import print_function
import logging
import numpy as np
from numpy.lib.recfunctions import append_fields
from sklearn.cluster import DBSCAN
from lmatools.coordinateSystems import GeographicSystem
from lmatools.flashsort.flash_stats import calculate_flash_stats, Flash
def gen_stream(vec, IDs): #<1>
for v, vi in zip(vec, IDs):
yield (v, vi)
def reset_buffer():
buf = []
return buf, buf.append
def gen_chunks(stream, start_time, max_duration, t_idx=-1):
""" Generator function that consumes a stream of points, one at a
time, and their unique index. These points are bundled together
into a chunks of length max_duration along the time coordinate.
For each point vector v, the time coordinate is given by v[t_idx]
"""
next_time = start_time + max_duration
v_buffer, append = reset_buffer() # slight optimization since attr lookup is avoided
i_buffer, append_idx = reset_buffer()
for v, vi in stream:
append(v)
append_idx(vi)
t = v[t_idx]
if t >= next_time:
yield (np.asarray(v_buffer), np.asarray(i_buffer))
v_buffer, append = reset_buffer()
i_buffer, append_idx = reset_buffer()
next_time = t+max_duration
yield (np.asarray(v_buffer), np.asarray(i_buffer))
class ChunkedFlashSorter(object):
"""
Sort LMA data from points to flashes using many small chunks
of points. Allows for algorithms that do not scale efficiently with
large numbers of points.
The __init__ and geo_to_cartesian
methods are more generally useful, and could be factored out into a
generic flash sorting class.
The actual clustering algorithm must be implemented in identify_clusters.
A prototype method is provided below which indicates the necessary call
signature.
"""
def __init__(self, params, min_points=1, **kwargs):
"""
params: dictionary of parameters used to perform data QC and clustering
min_points: the minimum number of points allowed in a cluster
"""
self.logger = logging.getLogger('FlashAutorunLogger')
self.logger.info('%s', params)
self.params = params
self.min_points = min_points
self.ctr_lat, self.ctr_lon, self.ctr_alt = (
params['ctr_lat'], params['ctr_lon'], 0.0)
def geo_to_cartesisan(self, lon, lat, alt):
""" Convert lat, lon in degrees and altitude in meters to
Earth-centered, Earth-fixed cartesian coordinates. Translate
to coordinate center location. Returns X,Y,Z in meters.
"""
geoCS = GeographicSystem()
X,Y,Z = geoCS.toECEF(lon, lat, alt)
Xc, Yc, Zc = geoCS.toECEF(self.ctr_lon, self.ctr_lat, self.ctr_alt)
X, Y, Z = X-Xc, Y-Yc, Z-Zc
return (X, Y, Z)
def identify_clusters(self, data):
""" For data with shape (N, D) in D dimensions, return
a vector of labels of length N.
min_points is the minimum number of points required to form a
a cluster. For the DBSCAN algorithm, this is min_samples for
a core cluster.
This function adopts the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
err = "Please create a new subclass and implement this method"
raise NotImplementedError(err)
def gen_cluster_chunk_pairs(self, stream):
""" Generator function that consumes a stream of chunks of data,
and processes overlapping pairs. The stream is to consist of
tuples of (chunk, pt_id), where pt_id is a unique index for
each vector in chunk.
Chunk is of shape (N, D) for N point vectors in D dimensions
pt_id has shape (N,)
Calls self.identify_clusters, which returns a vector N labels.
The labels are presumed to adopt the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
chunk1, id1 = next(stream)
for chunk2, id2 in stream:
len1 = chunk1.shape[0]
len2 = chunk2.shape[0]
if len2 == 0:
conc = chunk1
concID = id1
chunk2 = chunk1[0:0,:]
id2 = id1[0:0]
elif len1 == 0:
conc = chunk2
concID = id2
chunk1 = chunk2[0:0,:]
id1 = id2[0:0]
else:
print(id1.shape, id2.shape)
conc = np.vstack((chunk1, chunk2))
concID = np.concatenate((id1, id2))
# do stuff with chunk 1 and 2
labels = self.identify_clusters(conc)
# defer sending these in one bundle ... need to ensure all labels
# from this run of clustering stay together
# clustered_output_target.send((chunk1, labels[:len1])) IS BAD
# pull data out of chunk2 that was clustered as part of chunk 1
chunk1_labelset = set(labels[:len1])
if -1 in chunk1_labelset:
chunk1_labelset.remove(-1) # remove the singleton cluster ID - we want to retain these from chunk 2.
clustered_in_chunk2 = np.fromiter( ( True if label in chunk1_labelset else False for i,label in enumerate(labels[len1:])) , dtype=bool)
clustered_in_chunk1 = np.ones(chunk1.shape[0], dtype = bool)
clustered_mask = np.hstack((clustered_in_chunk1, clustered_in_chunk2))
bundle_chunks = conc[clustered_mask,:]
bundle_IDs = concID[clustered_mask]
bundle_labels = np.concatenate((labels[:len1], labels[len1:][clustered_in_chunk2]))
assert bundle_chunks.shape[0] == bundle_labels.shape[0]
yield (bundle_chunks, bundle_labels, bundle_IDs)
del bundle_chunks, bundle_labels
# clustered_output_target.send((chunk2[clustered_in_chunk2], labels[len1:][clustered_in_chunk2]))
residuals = conc[clustered_mask==False,:]
# Because we pull some points from chunk2 and combine them with
# flashes that started in chunk1, the data are now out of their
# original order. Therefore, send along the data IDs that go with the
# pulled points so that the original order is still tracked.
residualIDs = concID[clustered_mask==False]
# optimization TODO: pull clusters out of chunk 2 whose final point is greater
# than the distance threshold from the end of the second chunk interval. They're already clustered
# and don't need to be clustered again.
# prepare for another chunk
if len(residuals) == 0:
residuals = chunk1[0:0,:] # empty array that preserves the number of dimensions in the data vector - no obs.
residualIDs = id1[0:0]
del chunk1, id1
chunk1 = np.asarray(residuals)
id1 = np.asarray(residualIDs)
del residuals, residualIDs
if chunk1.shape[0] != 0:
labels = self.identify_clusters(chunk1)
yield (chunk1, labels, id1)
def aggregate_ids(self, stream):
""" Final step in streamed clustering: consume clustered output from
one or more chunks of data, ensuring that the IDs increment
across chunk boundaries.
"""
# TODO: remove v from loop below; not needed.
unique_labels = set([-1])
total = 0
point_labels = []
all_IDs = []
# all_v = []
# n_last = 0
for (v, orig_labels, IDs) in stream:
labels = np.atleast_1d(orig_labels).copy()
if len(unique_labels) > 0:
# Only add those labels that represent valid clusters (nonnegative) to the unique set.
# Make sure labels increment continuously across all chunks received
nonsingleton = (labels >= 0)
labels[nonsingleton] = labels[nonsingleton] + (max(unique_labels) + 1)
for l in set(labels):
unique_labels.add(l)
all_IDs.append(np.asarray(IDs))
point_labels.append(labels)
total += v.shape[0]
del v, orig_labels, labels, IDs
print("done with {0} total points".format(total))
if total == 0:
point_labels = np.asarray(point_labels, dtype=int)
point_labels = np.asarray(all_IDs, dtype=int)
else:
point_labels = np.concatenate(point_labels)
all_IDs = np.concatenate(all_IDs)
print("returning {0} total points".format(total))
return (unique_labels, point_labels, all_IDs)
def create_flash_objs(self, lma, good_data, unique_labels, point_labels, all_IDs):
""" lma is an LMADataset object. Its data instance gets overwritten
with the qc'd, flash_id'd data, and it gains a flashes attribute
with a list of flash objects resulting from flash sorting.
all_IDs gives the index in the original data array to
which each point_label corresponds.
unique_labels is the set of all labels produced by previous stages
in the flash sorting algorithm, including a -1 ID for all singleton flashes.
"""
logger = self.logger
# add flash_id column
empty_labels = np.empty_like(point_labels)
# placing good_data in a list due to this bug when good_data has length 1
# http://stackoverflow.com/questions/36440557/typeerror-when-appending-fields-to-a-structured-array-of-size-one
if 'flash_id' not in good_data.dtype.names:
data = append_fields([good_data], ('flash_id',), (empty_labels,))
else:
data = good_data.copy()
# all_IDs gives the index in the original data array to
# which each point_label corresponds
data['flash_id'][all_IDs] = point_labels
# In the case of no data, lma.data.shape will have
# length zero, i.e., a 0-d array
if (len(data.shape) == 0) | (data.shape[0] == 0):
# No data
flashes = []
else:
# work first with non-singleton flashes
# to have strictly positive flash ids
print(data.shape)
singles = (data['flash_id'] == -1)
non_singleton = data[ np.logical_not(singles) ]
print(non_singleton['flash_id'].shape)
order = np.argsort(non_singleton['flash_id'])
ordered_data = non_singleton[order]
flid = ordered_data['flash_id']
if (flid.shape[0]>0):
max_flash_id = flid[-1]
else:
max_flash_id = 0
try:
assert max_flash_id == max(unique_labels)
except AssertionError:
print("Max flash ID {0} is not as expected from unique labels {1}".format(max_flash_id, max(unique_labels)))
boundaries, = np.where(flid[1:]-flid[:-1]) # where indices are nonzero
boundaries = np.hstack(([0], boundaries+1))
max_idx = len(flid) #- 1
slice_lower_edges = tuple(boundaries)
slice_upper_edges = slice_lower_edges[1:] + (max_idx,)
slices = list(zip(slice_lower_edges, slice_upper_edges))
flashes = [ Flash(ordered_data[slice(*sl)]) for sl in slices ]
print("finished non-singletons")
# Now deal with the nonsingleton points.
# Each singleton point will have a high flash_id,
# starting with the previous maximum flash id.
singleton = data[singles]
n_singles = singleton.shape[0]
# this operation works on a view of the original data array,
# so it modifies the original data array
singleton['flash_id'] += max_flash_id + 1 + np.arange(n_singles, dtype=int)
singleton_flashes = [ Flash(singleton[i:i+1]) for i in range(n_singles)]
data[singles] = singleton
print("finished singletons")
flashes += singleton_flashes
logtext = "Calculating flash initation, centroid, area, etc. for %d flashes" % (len(flashes), )
logger.info(logtext)
# print flashes[0].points.dtype
for fl in flashes:
# header = ''.join(lma.header)
fl.metadata = lma.metadata #FlashMetadata(header)
calculate_flash_stats(fl)
# logger.info(fl.points.shape[0])
logger.info('finished setting flash metadata')
lma.raw_data = lma.data
lma.data = data
assert (lma.data['flash_id'].min() >=0) # this should be true since the singletons were modified in the original data array above
lma.flashes = flashes
def perform_chunked_clustering(self, XYZT, ptIDs, chunk_duration):
""" Perform clustering of a 4D data vector in overlapping chunks of
data,
XYZT: (N,4) array of N 4D point vectors
ptIDs: array of N unique identifiers of each point vector.
chunk_duration: duration of chunk in the units along the T coordinate
"""
if XYZT.shape[0] < 1:
# no data, so minimum time is zero. Assume nothing is done with the
# data, so that time doesn't matter. No flashes can result.
t_min = 0
else:
t_min = XYZT[:,-1].min()
point_stream = gen_stream(XYZT.astype('float64'), ptIDs)
chunk_stream = gen_chunks(point_stream, t_min, chunk_duration)
cluster_stream = self.gen_cluster_chunk_pairs(chunk_stream)
unique_labels, point_labels, all_IDs = self.aggregate_ids(cluster_stream)
return unique_labels, point_labels, all_IDs
def cluster(self, dataset, **kwargs):
""" Cluster an lmatools LMADataset provided in the dataset argument.
Basic filtering of the data is performed by calling the filter_data
method of the dataset, which returns a filtered data array. The
params are provided by the argument used to initialize this class.
This method modifies dataset as a side effect.
"""
data = dataset.filter_data(self.params)
print("sorting {0} total points".format(data.shape[0]))
# Transform to cartesian coordiantes
X, Y, Z = self.geo_to_cartesisan(data['lon'], data['lat'], data['alt'])
# Assemble a normalized data vector using flash sorting parameters
D_max, t_max = (self.params['distance'], # meters
self.params['thresh_critical_time']) # seconds
duration_max = self.params['thresh_duration'] # seconds
IDs = np.arange(X.shape[0]) # Vector of unique point IDs
X_vector = np.hstack((X[:,None],Y[:,None],Z[:,None])) / D_max
T_vector = data['time'][:,None] / t_max
XYZT = np.hstack((X_vector, T_vector))
# Perform chunked clustering of the data
normed_chunk_duration = duration_max/t_max
unique_labels, point_labels, all_IDs = self.perform_chunked_clustering(XYZT, IDs, normed_chunk_duration)
# Calculate flash metadata and store it in flash objects
# This should be factored out into somehting that modifies the data table
# and something that creates the flash objects themselves
self.create_flash_objs(dataset, data, unique_labels, point_labels, all_IDs)
class DBSCANFlashSorter(ChunkedFlashSorter):
def identify_clusters(self, data):
""" For data with shape (N, D) in D dimensions, return
a vector of labels of length N.
min_points is the minimum number of points required to form a
a cluster. For the DBSCAN algorithm, this is min_samples for
a core cluster.
This function adopts the convention that clusters labeled
with an ID of -1 are singleton points not belonging to a
cluster, consistent with the convention of sklearn.cluster.DBSCAN
"""
db = DBSCAN(eps=1.0, min_samples=self.min_points, metric='euclidean')
clusters = db.fit(data)
labels = clusters.labels_.astype(int)
return labels
|
94791
|
from . import matplotlib_fix
from . import ipynb
from . import colormap
from . import plot
from .context_manager import LatexContextManager
from .context_manager import figure_context
from .context_manager import axes_context
from .module_facet_grid import facet_grid
from .module_facet_grid import facet_grid_zero_space_time_frequency_plot
from .plot import time_series
from .display_pdf import PDF
|
94794
|
import numpy as np
import quaternion
def from_tqs_to_matrix(translation, quater, scale):
"""
(T(3), Q(4), S(3)) -> 4x4 Matrix
:param translation: 3 dim translation vector (np.array or list)
:param quater: 4 dim rotation quaternion (np.array or list)
:param scale: 3 dim scale vector (np.array or list)
:return: 4x4 transformation matrix
"""
q = np.quaternion(quater[0], quater[1], quater[2], quater[3])
T = np.eye(4)
T[0:3, 3] = translation
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(scale)
M = T.dot(R).dot(S)
return M
def apply_transform(points, *args):
"""
points = points х args[0] x args[1] x args[2] x ... args[-1]
:param points: np.array N x (3|4)
:param args: array of transformations. May be 4x4 np.arrays, or dict {
'transformation': [t1, t2, t3],
'rotation': [q1, q2, q3, q4],
'scale': [s1, s2, s3],
}
:return: transformed points
"""
# save origin dimensionality and add forth coordinate if needed
initial_dim = points.shape[-1]
if initial_dim == 3:
points = add_forth_coord(points)
# transform each transformation to 4x4 matrix
transformations = []
for transform in args:
if type(transform) == dict:
transformations.append(from_tqs_to_matrix(
translation=transform['translation'],
quater=transform['rotation'],
scale=transform['scale']
))
else:
transformations.append(transform)
# main loop
for transform in transformations:
points = points @ transform.T
# back to origin dimensionality if needed
if initial_dim == 3:
points = points[:, :3]
return points
def apply_inverse_transform(points, *args):
"""
points = points х args[0] x args[1] x args[2] x ... args[-1]
:param points: np.array N x (3|4)
:param args: array of tranformations. May be 4x4 np.arrays, or dict {
'transformation': [t1, t2, t3],
'rotation': [q1, q2, q3, q4],
'scale': [s1, s2, s3],
}
:return: transformed points
"""
# save origin dimensionality and add forth coordinate if needed
initial_dim = points.shape[-1]
if initial_dim == 3:
points = add_forth_coord(points)
# transform each transformation to 4x4 matrix
transformations = []
for transform in args:
if type(transform) == dict:
t = from_tqs_to_matrix(
translation=transform['translation'],
quater=transform['rotation'],
scale=transform['scale']
)
t = np.linalg.inv(t)
transformations.append(t)
else:
t = np.linalg.inv(transform)
transformations.append(t)
# main loop
for transform in transformations:
points = points @ transform.T
# back to origin dimensionality if needed
if initial_dim == 3:
points = points[:, :3]
return points
def add_forth_coord(points):
"""forth coordinate is const = 1"""
return np.hstack((points, np.ones((len(points), 1))))
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
|
94823
|
import math
import json
from colormath.color_objects import RGBColor
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from common.models import UserBase, ResultBase
from common.utils import save_obj_attr_base64_image, get_content_tuple, \
get_opensurfaces_storage
from shapes.models import Shape, MaterialShape, MaterialShapeLabelBase
BSDF_VERSIONS = ("wd", ) # just Ward for now
STORAGE = get_opensurfaces_storage()
class EnvironmentMap(UserBase):
""" Environment map used with a BRDF """
name = models.CharField(max_length=128, unique=True)
# Tonemapping parameters for [Reinhard 2002, Equation 4]
# The log_average luminance is baked into the scale as a
# precomputation.
# scale: key / log_average luminance
# white: values higher than this will be set to pure white
tonemap_scale = models.FloatField()
tonemap_white = models.FloatField()
class ShapeBsdfLabelBase(MaterialShapeLabelBase):
""" Base class of BSDF labels"""
# json-encoded dictionary of counts (where each count is the number of
# times a UI element was adjusted)
edit_dict = models.TextField(blank=True)
# sum of the values in edit_dict
edit_sum = models.IntegerField(default=0)
# number of nonzero values in edit_dict
edit_nnz = models.IntegerField(default=0)
# environment map used to light the blob
envmap = models.ForeignKey(EnvironmentMap, null=True, blank=True)
# screenshot
image_blob = models.ImageField(
upload_to='blobs', null=True, blank=True, max_length=255,
storage=STORAGE)
# option to give up
give_up = models.BooleanField(default=False)
give_up_msg = models.TextField(blank=True)
# reverse generic relationship for quality votes
qualities = generic.GenericRelation(
'ShapeBsdfQuality', content_type_field='content_type',
object_id_field='object_id')
# first voting stage: color matches?
color_correct = models.NullBooleanField()
# further from 0: more confident in assignment of color_correct
color_correct_score = models.FloatField(null=True, blank=True)
# second voting stage: gloss matches?
gloss_correct = models.NullBooleanField()
# further from 0: more confident in assignment of gloss_correct
gloss_correct_score = models.FloatField(null=True, blank=True)
# The method by which the reflectance widget was initialized
INIT_METHODS = (
('KM', 'k-means color, middle value gloss'),
('KR', 'k-means color, random gloss')
)
init_method_to_str = dict((k, v) for (k, v) in INIT_METHODS)
init_method = models.CharField(max_length=2, choices=INIT_METHODS)
# L*a*b* colorspace for matching blobs
color_L = models.FloatField(blank=True, null=True)
color_a = models.FloatField(blank=True, null=True)
color_b = models.FloatField(blank=True, null=True)
def better_than(self, other):
if self is other:
return False
elif not other:
return True
elif self.invalid != other.invalid:
return not self.invalid
elif bool(self.color_correct) != bool(other.color_correct):
return bool(self.color_correct)
elif bool(self.gloss_correct) != bool(other.gloss_correct):
return bool(self.gloss_correct)
else:
try:
return (self.color_correct_score + self.gloss_correct_score >
other.color_correct_score + other.gloss_correct_score)
except TypeError:
return True
def get_entry_dict(self):
return {'id': self.id, 'shape': self.shape.get_entry_dict()}
def mark_invalid(self, *args, **kwargs):
self.color_correct = False
self.gloss_correct = False
super(Shape, self).mark_invalid(*args, **kwargs)
class Meta:
abstract = True
ordering = ['-edit_nnz', '-time_ms']
#@classmethod
#def mturk_needs_more(cls, instance):
#""" Return True if more of this object should be scheduled """
#correct_list = cls.objects \
#.filter(shape=instance.shape) \
#.values_list('color_correct', 'gloss_correct')
## only schedule more if all were graded and all were rejected
#return ((not correct_list) or
#all((c[0] is False or c[1] is False) for c in correct_list))
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if (not labels or any((l.color_correct_score is None and
#l.gloss_correct_score is None) for l in labels)):
#return None
#bad = sum(1 for l in labels if
#l.admin_score <= -2 or
#l.time_ms is None or
#l.edit_nnz <= 1 or
#(l.color_correct_score is not None and
#l.color_correct_score < -0.5 and l.time_ms < 60000) or
#(l.gloss_correct_score is not None and
#l.gloss_correct_score < -0.5 and l.time_ms < 60000))
#if bad > 0 or any(l.color_correct_score < 0 or l.gloss_correct_score < 0 for l in labels):
#return float(bad) / float(len(labels))
## reward good rectifications
#return sum(-1.0 for l in labels if
#l.color_correct_score > 0.5 and
#l.gloss_correct_score > 0.5 and
#l.time_ms > 10000)
class ShapeBsdfLabel_mf(ShapeBsdfLabelBase):
"""
Microfacet BSDF model
** CURRENTLY UNUSED **
"""
shape = models.ForeignKey(MaterialShape, related_name='bsdfs_mf')
BSDF_TYPES = (('P', 'plastic'), ('C', 'conductor'))
bsdf_type_to_str = {k: v for k, v in BSDF_TYPES}
str_to_bsdf_type = {v: k for k, v in BSDF_TYPES}
# plastic or conductor
bsdf_type = models.CharField(max_length=1, choices=BSDF_TYPES)
alpha_index = models.IntegerField() # integer index into roughness table
specular = models.FloatField() # specular weight
color_sRGB = models.CharField(max_length=6) # "RRGGBB" hex
def type_name(self):
return ShapeBsdfLabel_mf.bsdf_type_to_str[self.bsdf_type]
@staticmethod
def version():
return 'mf'
def __unicode__(self):
return '%s alpha_index=%s color=%s' % (
self.bsdf_type, self.alpha, self.color)
def get_thumb_template(self):
return 'bsdf_mf_shape_thumb.html'
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
raise NotImplementedError("TODO")
class ShapeBsdfLabel_wd(ShapeBsdfLabelBase):
"""
Ward BSDF model.
Note: This is the "balanced" ward-duel model with energy balance at all angles
from [<NAME>., and <NAME>. A new ward brdf model with bounded
albedo. In Computer Graphics Forum (2010), vol. 29, Wiley Online Library,
pp. 1391-1398.]. We use the implementation from Mitsuba available at
http://www.mitsuba-renderer.org.
"""
shape = models.ForeignKey(MaterialShape, related_name='bsdfs_wd')
# c in [0, 1]
contrast = models.FloatField()
# d in [0, 15] discretized alpha
doi = models.IntegerField()
# true if the 'rho_s only' was selected, false if traditional ward
metallic = models.BooleanField(default=False)
# color in "#RRGGBB" sRGB hex format
color = models.CharField(max_length=7)
@staticmethod
def version():
return 'wd'
def __unicode__(self):
return 'ward sRGB=%s' % (self.color)
def get_thumb_template(self):
return 'bsdf_wd_shape_thumb.html'
def c(self):
return self.contrast
def d(self):
return 1 - (0.001 + (15 - self.doi) * 0.2 / 15)
def d_edits(self):
return json.loads(self.edit_dict)['doi']
def c_edits(self):
return json.loads(self.edit_dict)['contrast']
def alpha(self):
return 1 - self.d()
def rho_s(self):
rho_s = self.rho()[1]
return '%0.3f, %0.3f, %0.3f' % rho_s
def rho_d(self):
rho_d = self.rho()[0]
return '%0.3f, %0.3f, %0.3f' % rho_d
def rho(self):
if not hasattr(self, '_rho'):
rgb = self.colormath_rgb()
v = self.v()
# approximate cielab_inverse_f.
# we have V instead of L, so the same inverse formula doesn't
# apply anyway.
finv = v ** 3
if self.metallic:
rho_s = finv
s = rho_s / (v * 255.0) if v > 0 else 0
self._rho = (
(0, 0, 0),
(s * rgb.rgb_r, s * rgb.rgb_g, s * rgb.rgb_b),
)
else:
rho_d = finv
t = self.contrast + (rho_d * 0.5) ** (1.0 / 3.0)
rho_s = t ** 3 - rho_d * 0.5
rho_t = rho_s + rho_d
if rho_t > 1:
rho_s /= rho_t
rho_d /= rho_t
s = rho_d / (v * 255.0) if v > 0 else 0
self._rho = (
(s * rgb.rgb_r, s * rgb.rgb_g, s * rgb.rgb_b),
(rho_s, rho_s, rho_s)
)
return self._rho
def v(self):
""" Return the V component of HSV, in the range [0, 1] """
rgb = self.colormath_rgb()
return max(rgb.rgb_r, rgb.rgb_b, rgb.rgb_g) / 255.0
def colormath_rgb(self):
if not hasattr(self, '_colormath_rgb'):
self._colormath_rgb = RGBColor()
self._colormath_rgb.set_from_rgb_hex(self.color)
return self._colormath_rgb
def colormath_lab(self):
if not hasattr(self, '_colormath_lab'):
self._colormath_lab = self.colormath_rgb().convert_to('lab')
return self._colormath_lab
def color_distance(self, bsdf):
return math.sqrt((self.color_L - bsdf.color_L) ** 2 +
(self.color_a - bsdf.color_a) ** 2 +
(self.color_b - bsdf.color_b) ** 2)
def gloss_distance(self, bsdf):
return math.sqrt((self.c() - bsdf.c()) ** 2 +
(1.78 * (self.d() - bsdf.d())) ** 2)
def save(self, *args, **kwargs):
if (self.color_L is None) or (self.color_a is None) or (self.color_b is None):
c = RGBColor()
c.set_from_rgb_hex(self.color)
c = c.convert_to('lab')
self.color_L = c.lab_l
self.color_a = c.lab_a
self.color_b = c.lab_b
super(ShapeBsdfLabel_wd, self).save(*args, **kwargs)
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
experiment, mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
new_objects = {}
for shape in hit_contents:
d = results[unicode(shape.id)]
shape_time_ms = time_ms[unicode(shape.id)]
shape_time_active_ms = time_active_ms[unicode(shape.id)]
edit_dict = d[u'edit']
edit_sum = sum(int(edit_dict[k]) for k in edit_dict)
edit_nnz = sum(int(int(edit_dict[k]) > 0) for k in edit_dict)
init_method = 'KR'
envmap = EnvironmentMap.objects.get(
id=json.loads(experiment.variant)['envmap_id'])
doi = int(d[u'doi'])
contrast = float(d[u'contrast'])
metallic = (int(d[u'type']) == 1)
color = d['color']
give_up = d[u'give_up']
give_up_msg = d[u'give_up_msg']
bsdf, bsdf_created = shape.bsdfs_wd.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=shape_time_ms,
time_active_ms=shape_time_active_ms,
doi=doi,
contrast=contrast,
metallic=metallic,
color=color,
give_up=give_up,
give_up_msg=give_up_msg,
edit_dict=json.dumps(edit_dict),
edit_sum=edit_sum,
edit_nnz=edit_nnz,
envmap=envmap,
init_method=init_method,
)
if bsdf_created:
new_objects[get_content_tuple(shape)] = [bsdf]
if ((not bsdf.image_blob) and 'screenshot' in d and d['screenshot'].startswith('data:image/')):
save_obj_attr_base64_image(bsdf, 'image_blob', d['screenshot'])
return new_objects
class ShapeBsdfQuality(ResultBase):
""" Vote on whether or not a BSDF matches its shape. The foreign key to
the BSDF is generic since there are multiple BSDF models. """
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField(db_index=True)
bsdf = generic.GenericForeignKey('content_type', 'object_id')
color_correct = models.NullBooleanField()
gloss_correct = models.NullBooleanField()
canttell = models.NullBooleanField()
def __unicode__(self):
if self.canttell:
return "can't tell"
else:
if self.has_color():
if self.has_gloss():
return 'gloss: %s, color: %s' % (
self.gloss_correct, self.color_correct,
)
return 'color: %s' % self.color_correct
elif self.has_gloss:
return 'gloss: %s' % self.gloss_correct
else:
return 'INVALID LABEL'
def get_thumb_template(self):
return 'bsdf_%s_shape_label_thumb.html' % (
self.content_type.model_class().version())
def has_color(self):
return self.color_correct is not None
def has_gloss(self):
return self.gloss_correct is not None
class Meta:
verbose_name = "BSDF quality vote"
verbose_name_plural = "BSDF quality votes"
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not labels:
#return None
#if any((l.color_correct is not None and
#l.bsdf.color_correct_score is None) or
#(l.gloss_correct is not None and
#l.bsdf.gloss_correct_score is None)
#for l in labels):
#return None
#bad = sum(1 for l in labels if
#(l.color_correct is not None and
#l.color_correct != l.bsdf.color_correct and
#abs(l.bsdf.color_correct_score) > 0.5) or
#(l.gloss_correct is not None and
#l.gloss_correct != l.bsdf.gloss_correct and
#abs(l.bsdf.color_correct_score) > 0.5))
#return float(bad) / float(len(labels))
#@classmethod
#def mturk_badness_reason(cls, mturk_assignment):
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#T = sum(1 for l in labels if
#(l.color_correct is True and l.bsdf.color_correct is False) or
#(l.gloss_correct is True and l.bsdf.gloss_correct is False))
#F = sum(1 for l in labels if
#(l.color_correct is False and l.bsdf.color_correct is True) or
#(l.gloss_correct is False and l.bsdf.gloss_correct is True))
#if T > F * 1.5:
#return 'T'
#elif F > T * 1.5:
#return 'F'
#return None
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
experiment, mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
# best we can do is average
avg_time_ms = time_ms / len(hit_contents)
avg_time_active_ms = time_active_ms / len(hit_contents)
new_objects = {}
for bsdf in hit_contents:
selected = (str(results[unicode(bsdf.id)]['selected']).lower()
== 'true')
canttell = (str(results[unicode(bsdf.id)]['canttell']).lower()
== 'true')
color_correct = None
gloss_correct = None
if 'color' in experiment.slug:
color_correct = selected
elif 'gloss' in experiment.slug:
gloss_correct = selected
content_tuple = get_content_tuple(bsdf)
new_obj, created = ShapeBsdfQuality.objects.get_or_create(
content_type=ContentType.objects.get_for_id(content_tuple[0]),
object_id=content_tuple[1],
user=user,
mturk_assignment=mturk_assignment,
time_ms=avg_time_ms,
time_active_ms=avg_time_active_ms,
color_correct=color_correct,
gloss_correct=gloss_correct,
canttell=canttell
)
if created:
new_objects[content_tuple] = [new_obj]
return new_objects
|
94855
|
from django.apps import AppConfig
class Config(AppConfig):
name = "paywall"
verbose_name = "paywall simulator"
label = "paywall"
|
94860
|
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
|
94872
|
from forge.blade import lib
class Recipe:
def __init__(self, *args, amtMade=1):
self.amtMade = amtMade
self.blueprint = lib.MultiSet()
for i in range(0, len(args), 2):
inp = args[i]
amt = args[i+1]
self.blueprint.add(inp, amt)
|
94906
|
import tkinter as tk
from PIL import ImageTk, Image
from os import listdir
import cv2
import numpy as np
root=tk.Tk()
root.title("DataX: Team OST, Plastic Part Matching")
#Init database
path=r"C:\Users\tobias.grab\IWK_data\test"
files=listdir(path)
nrOfFiles=len(files)
bf = cv2.BFMatcher()
fast=1
if fast==1:
img_database=np.load(r"C:\Users\tobias.grab\IWK_data\savedArrays\img_database.npy")
img_database=[img_database[i,:,:] for i in range(len(files))]
else:
img_database=[cv2.imread(path+'\\'+file,0) for file in files]
img_database_pillow=[ImageTk.PhotoImage(Image.fromarray(img).resize((320, 240),Image.ANTIALIAS)) for img in img_database]
def open_file():
from tkinter.filedialog import askopenfilename
file_path = askopenfilename(title=u'select file')
name=file_path.split("/")[-1]
img_to_match=cv2.imread(file_path,0)
img_to_match_pillow=ImageTk.PhotoImage(Image.fromarray(img_to_match).resize((320, 240),Image.ANTIALIAS))
if v.get()==1:
ALG=cv2.xfeatures2d.SURF_create()
elif v.get()==2:
ALG=cv2.xfeatures2d.SURF_create()
elif v.get()==3:
ALG=cv2.BRISK_create()
elif v.get()==4:
ALG=cv2.AKAZE_create()
elif v.get()==5:
ALG=cv2.KAZE_create()
img_database_fts=[ALG.detectAndCompute(img, None) for img in img_database]
draw_database=[ImageTk.PhotoImage(Image.fromarray(
cv2.drawKeypoints(img_from_database, img_database_fts[nr][0],None)).resize((320, 240),Image.ANTIALIAS)
) for nr, img_from_database in enumerate(img_database)]
(kps1, descs1) = ALG.detectAndCompute(img_to_match, None)
layout2=tk.Label(root)
layout2.place(relx=0.5,rely=0, relwidth=1, relheight=1,anchor='n')
label_img_to_match=tk.Label(layout2,image=img_to_match_pillow)
label_img_to_match.image=img_to_match_pillow
label_img_to_match.place(relx=0.3,rely=0.1, width=320, height=240,anchor='n')
label_img_database=tk.Label(layout2,image=draw_database[0])
label_img_database.image=draw_database[0]
label_img_database.place(relx=0.7,rely=0.1, width=320, height=240,anchor='n')
label_img_matched=tk.Label(layout2,image=draw_database[0])
label_img_matched.image=draw_database[0]
label_img_matched.place(relx=0.5,rely=0.5, width=640, height=240,anchor='n')
nrOfGoodPerImage=np.zeros([nrOfFiles,1])
image_list_matched=[]
def calc(j):
if j<nrOfFiles-1:
bf = cv2.BFMatcher()
kps2=img_database_fts[j][0]
descs2=img_database_fts[j][1]
matches = bf.knnMatch(descs1,descs2,k=2)
matchesMask = [[0,0] for i in range(len(matches))]
for i,(m,n) in enumerate(matches):
if m.distance < 0.75*n.distance:
matchesMask[i]=[1,0]
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
nrOfGoodPerImage[j]=np.sum(matchesMask[:])
img3 = cv2.drawMatchesKnn(img_to_match,kps1,img_database[j],kps2,good,None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
img3_pillow=ImageTk.PhotoImage(Image.fromarray(img3).resize((640, 240),Image.ANTIALIAS))
image_list_matched.append(img3_pillow)
root.after(0, calc(j+1))
calc(0)
idx = (-np.squeeze(nrOfGoodPerImage)).argsort()[:3]
def matching(i):
if i<nrOfFiles-1:
label_img_database.config(image=draw_database[i])
label_img_matched.config(image=image_list_matched[i])
root.after(DELAY, lambda: matching(i+1))
elif i==nrOfFiles-1:
# my_label4=tk.Label(root,bg='#80c1ff')
my_label4=tk.Label(root,bg="LightSteelBlue1")
my_label4.place(relx=0.5,rely=0, relwidth=1, relheight=1,anchor='n')
org_label=tk.Label(my_label4,text="Image to match:\n"+name)
org_label.place(relx=0.15,rely=0.5,anchor='e')
org=tk.Label(my_label4,image=img_to_match_pillow)
org.place(relx=0.15,rely=0.5, width=320, height=240,anchor='w')
best_match_label=tk.Label(my_label4,text="Best Match:\n"+files[idx[0]])
best_match_label.place(relx=0.8,rely=0.2,anchor='w')
best_match=tk.Label(my_label4,image=img_database_pillow[idx[0]])
best_match.place(relx=0.8,rely=0.2, width=320, height=240,anchor='e')
best_match2_label=tk.Label(my_label4,text="Second Best Match:\n"+files[idx[1]])
best_match2_label.place(relx=0.8,rely=0.5,anchor='w')
best_match2=tk.Label(my_label4,image=img_database_pillow[idx[1]])
best_match2.place(relx=0.8,rely=0.5, width=320, height=240,anchor='e')
best_match3_label=tk.Label(my_label4,text="Third Best Match:\n"+files[idx[2]])
best_match3_label.place(relx=0.8,rely=0.8,anchor='w')
best_match3=tk.Label(my_label4,image=img_database_pillow[idx[2]])
best_match3.place(relx=0.8,rely=0.8, width=320, height=240,anchor='e')
# my_title2=tk.Label(my_label4,text="Matching finished! Displaying results...",font=("Helvetica",20), bg='#80c1ff')
my_title2=tk.Label(my_label4,text="Matching finished! Displaying results...",font=("Helvetica",20), bg="LightSteelBlue1")
my_title2.place(relx=0.5,rely=0.0, relwidth=0.4, relheight=0.05,anchor='n')
matching(0)
DELAY=10
HEIGHT=900
WIDTH=1400
canvas=tk.Canvas(root,height=HEIGHT, width=WIDTH)
canvas.pack()
# button_quit= tk.Button(root, text="Exit Program", command=root.quit)
# button_quit.pack()
my_title=tk.Label(root,text="Choose the algorithm you want to use",font=("Helvetica",16))
my_title.place(relx=0.5,rely=0.0, relwidth=0.4, relheight=0.1,anchor='n')
v = tk.IntVar()
v.set(1) # initializing the choice, i.e. Python
languages = [
("SIFT",1),
("SURF",2),
("BRISK",3),
("AKAZE",4),
("KAZE",5)
]
for txt, val in languages:
tk.Radiobutton(root,
text=txt,
padx = 10, pady=10,
variable=v,
value=val).place(relx=0.5,rely=0.1+val/40, relwidth=0.1, relheight=0.025,anchor='n')
my_title1=tk.Label(root,text="Choose the testimage:",font=("Helvetica",16))
my_title1.place(relx=0.5,rely=0.525, relwidth=0.4, relheight=0.1,anchor='n')
btn = tk.Button(root, text ='Open', command = lambda: open_file(),bg="LightSteelBlue1")
btn.place(relx=0.5,rely=0.6, relwidth=0.6, relheight=0.2,anchor='n')
root.mainloop()
|
94993
|
from wsgiref.simple_server import make_server
from fs.osfs import OSFS
from wsgi import serve_fs
osfs = OSFS('~/')
application = serve_fs(osfs)
httpd = make_server('', 8000, application)
print "Serving on http://127.0.0.1:8000"
httpd.serve_forever()
|
95042
|
from .location import Location, Direction
from .move import Move
__all__ = ['converter', 'Location', 'Move', 'notation_const']
|
95063
|
from functools import reduce
from operator import mul
def product(iterable=(), start=1):
""" kata currently supports only Python 3.4.3 """
return reduce(mul, iterable, start)
# __builtins__.product = product
|
95156
|
import tensorflow as tf
cluster = tf.train.ClusterSpec({"local": ["192.168.122.171:2222", "192.168.122.40:2222"]})
x = tf.constant(2)
with tf.device("/job:local/task:1"):
y2 = x - 66
with tf.device("/job:local/task:0"):
y1 = x + 300
y = y1 + y2
with tf.Session("grpc://192.168.122.40:2222") as sess:
result = sess.run(y)
print(result)
|
95188
|
import unittest
import pyslow5 as slow5
import time
import numpy as np
"""
Run from root dir of repo after making pyslow5
python3 -m unittest -v python/test.py
"""
#globals
debug = 0 #TODO: make this an argument with -v
class TestBase(unittest.TestCase):
def setUp(self):
self.s5 = slow5.Open('examples/example.slow5','r', DEBUG=debug)
self.read = self.s5.get_read("r1")
def test_class_methods(self):
result = type(self.s5)
self.assertEqual(str(result), "<class 'pyslow5.Open'>")
def test_read_type(self):
self.assertEqual(str(type(self.read)), "<class 'dict'>")
def test_read_id(self):
self.assertEqual(self.read['read_id'], "r1")
def test_read_group(self):
self.assertEqual(self.read['read_group'], 0)
def test_digitisation(self):
self.assertEqual(self.read['digitisation'], 8192.0)
def test_offset(self):
self.assertEqual(self.read['offset'], 23.0)
def test_range(self):
self.assertEqual(self.read['range'], 1467.61)
def test_sampling_rate(self):
self.assertEqual(self.read['sampling_rate'], 4000.0)
def test_len_raw_signal(self):
self.assertEqual(self.read['len_raw_signal'], 59676)
def test_pylen_signal(self):
self.assertEqual(len(self.read['signal']), 59676)
def test_signal(self):
self.assertEqual(sum(self.read['signal'][:10]), sum([1039,588,588,593,586,574,570,585,588,586]))
class TestRandomAccess(unittest.TestCase):
"""
Get data for ANOTHER ONE individual read, random access, check memory
"""
def setUp(self):
self.s5 = slow5.Open('examples/example.slow5','r', DEBUG=debug)
self.read = self.s5.get_read("r1")
self.read = self.s5.get_read("r4", pA=True)
def test_read_id(self):
self.assertEqual(self.read['read_id'], "r4")
def test_read_group(self):
self.assertEqual(self.read['read_group'], 0)
def test_digitisation(self):
self.assertEqual(self.read['digitisation'], 8192.0)
def test_offset(self):
self.assertEqual(self.read['offset'], 23.0)
def test_range(self):
self.assertEqual(self.read['range'], 1467.61)
def test_sampling_rate(self):
self.assertEqual(self.read['sampling_rate'], 4000.0)
def test_len_raw_signal(self):
self.assertEqual(self.read['len_raw_signal'], 59670)
def test_pylen_signal(self):
self.assertEqual(len(self.read['signal']), 59670)
def test_signal(self):
self.assertEqual(sum(self.read['signal'][:10]), sum([190.26, 108.92, 109.46, 109.1, 107.67, 108.39, 108.75, 109.1, 111.07, 108.39]))
class TestAUX(unittest.TestCase):
def setUp(self):
self.s5 = slow5.Open('examples/example2.slow5','r', DEBUG=debug)
self.read = self.s5.get_read("r1", aux=["read_number", "start_mux", "noExistTest"])
def test_read_id(self):
self.assertEqual(self.read['read_id'], "r1")
def test_read_number(self):
self.assertEqual(self.read['read_number'], 2287)
def test_start_mux(self):
self.assertEqual(self.read['start_mux'], 2)
def test_nonExistant_aux(self):
self.assertIs(self.read['noExistTest'], None)
class TestSequentialRead(unittest.TestCase):
def setUp(self):
self.s5 = slow5.Open('examples/example2.slow5','r', DEBUG=debug)
self.reads = self.s5.seq_reads()
def test_seq_reads(self):
results = ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', '0a238451-b9ed-446d-a152-badd074006c4', '0d624d4b-671f-40b8-9798-84f2ccc4d7fc']
for i, read in enumerate(self.reads):
with self.subTest(i=i, read=read['read_id']):
self.assertEqual(read['read_id'], results[i])
class TestYieldRead(unittest.TestCase):
def setUp(self):
self.s5 = slow5.Open('examples/example.slow5','r', DEBUG=debug)
def test_base_reads(self):
read_list = ["r1", "r3", "r5", "r2", "r1"]
results = ["r1", "r3", "r5", "r2", "r1"]
selected_reads = self.s5.get_read_list(read_list)
for i, read in enumerate(selected_reads):
if read is None:
sate = None
else:
state = read['read_id']
with self.subTest(i=i, read=state):
if read is None:
self.assertEqual(read, results[i])
else:
self.assertEqual(read['read_id'], results[i])
def test_reads_with_no_exist(self):
read_list = ["r1", "r3", "null_read", "r5", "r2", "r1"]
results = ["r1", "r3", None, "r5", "r2", "r1"]
selected_reads = self.s5.get_read_list(read_list)
for i, read in enumerate(selected_reads):
if read is None:
sate = None
else:
state = read['read_id']
with self.subTest(i=i, read=state):
if read is None:
self.assertEqual(read, results[i])
else:
self.assertEqual(read['read_id'], results[i])
class testHeaders(unittest.TestCase):
def setUp(self):
self.s5 = slow5.Open('examples/example.slow5','r', DEBUG=debug)
def test_get_header_names(self):
results = ['asic_id', 'asic_id_eeprom', 'asic_temp', 'auto_update', 'auto_update_source',
'bream_core_version', 'bream_is_standard', 'bream_map_version', 'bream_ont_version',
'bream_prod_version', 'bream_rnd_version', 'device_id', 'exp_script_name', 'exp_script_purpose',
'exp_start_time', 'experiment_kit', 'experiment_type', 'file_version', 'filename', 'flow_cell_id',
'heatsink_temp', 'hostname', 'installation_type', 'local_firmware_file', 'operating_system',
'protocol_run_id', 'protocols_version', 'run_id', 'sample_frequency', 'usb_config',
'user_filename_input', 'version']
names = self.s5.get_header_names()
self.assertEqual(names, results)
def test_get_all_headers(self):
results = ['asic_id', 'asic_id_eeprom', 'asic_temp', 'auto_update', 'auto_update_source',
'bream_core_version', 'bream_is_standard', 'bream_map_version', 'bream_ont_version',
'bream_prod_version', 'bream_rnd_version', 'device_id', 'exp_script_name', 'exp_script_purpose',
'exp_start_time', 'experiment_kit', 'experiment_type', 'file_version', 'filename', 'flow_cell_id',
'heatsink_temp', 'hostname', 'installation_type', 'local_firmware_file', 'operating_system',
'protocol_run_id', 'protocols_version', 'run_id', 'sample_frequency', 'usb_config',
'user_filename_input', 'version']
headers = self.s5.get_all_headers()
self.assertEqual(list(headers.keys()), results)
def test_get_all_headers(self):
attr = "flow_cell_id"
result = ""
val = self.s5.get_header_value(attr)
self.assertEqual(val, result)
def test_get_all_headers(self):
attr = "exp_start_time"
result = ""
val = self.s5.get_header_value(attr)
self.assertEqual(val, result)
def test_get_all_headers(self):
attr = "heatsink_temp"
result = ""
val = self.s5.get_header_value(attr)
self.assertEqual(val, result)
def test_get_all_headers(self):
results = ["3574887596", "0", "29.2145729", "1", "https://mirror.oxfordnanoportal.com/software/MinKNOW/", "172.16.31.10",
"1", "172.16.31.10", "172.16.31.10", "172.16.31.10", "0.1.1", "MN16450", "python/recipes/nc/NC_48Hr_Sequencing_Run_FLO-MIN106_SQK-LSK108.py",
"sequencing_run", "1479433093", "genomic_dna", "customer_qc", "1", "deamernanopore_20161117_fnfab43577_mn16450_sequencing_run_ma_821_r9_4_na12878_11_17_16_88738",
"FAB43577", "33.9921875", "DEAMERNANOPORE", "map", "0", "Windows 6.2", "a4429838-103c-497f-a824-7dffa72cfd81",
"1.1.20", "d6e473a6d513ec6bfc150c60fd4556d72f0e6d18", "4000", "1.0.11_ONT#MinION_fpga_1.0.1#ctrl#Auto",
"ma_821_r9.4_na12878_11_17_16", "1.1.20"]
names = self.s5.get_header_names()
for i, attr in enumerate(names):
with self.subTest(i=i, attr=attr):
val = self.s5.get_header_value(attr)
self.assertEqual(val, results[i])
class testAuxAll(unittest.TestCase):
def setUp(self):
self.s5 = slow5.Open('examples/example2.slow5','r', DEBUG=debug)
def test_get_aux_names(self):
result = ['channel_number', 'median_before', 'read_number', 'start_mux', 'start_time']
aux_names = self.s5.get_aux_names()
self.assertEqual(aux_names, result)
def test_get_aux_types(self):
result = [22, 9, 2, 4, 7]
aux_types = self.s5.get_aux_types()
aux_names = self.s5.get_aux_names()
self.assertEqual(aux_types, result)
def test_get_read_all_aux(self):
results = ['391', 260.557264, 2287, 2, 36886851]
read = self.s5.get_read("0d624d4b-671f-40b8-9798-84f2ccc4d7fc", aux="all")
aux_names = self.s5.get_aux_names()
for i, name in enumerate(aux_names):
with self.subTest(i=i, attr=name):
self.assertEqual(read[name], results[i])
def test_seq_reads_pA_aux_all(self):
results = [['r0', 1106.3899999999999, 78470500],
['r1', 1585.4299999999998, 36886851],
['r2', 1106.3899999999999, 78470500],
['r3', 1585.4299999999998, 36886851],
['r4', 1106.3899999999999, 78470500],
['r5', 1585.4299999999998, 36886851],
['0a238451-b9ed-446d-a152-badd074006c4', 1106.3899999999999, 78470500],
['0d624d4b-671f-40b8-9798-84f2ccc4d7fc', 1585.4299999999998, 36886851]]
reads = self.s5.seq_reads(pA=True, aux='all')
for i, read in enumerate(reads):
with self.subTest(i=i, attr=read['read_id']):
self.assertEqual(read['read_id'], results[i][0])
self.assertEqual(sum(read['signal'][:10]), results[i][1])
self.assertEqual(read['start_time'] ,results[i][2])
# def test_bad_type(self):
# data = "banana"
# with self.assertRaises(TypeError):
# result = sum(data)
if __name__ == '__main__':
unittest.main()
|
95306
|
from yggdrasil.communication import FileComm
class PickleFileComm(FileComm.FileComm):
r"""Class for handling I/O from/to a pickled file on disk.
Args:
name (str): The environment variable where file path is stored.
**kwargs: Additional keywords arguments are passed to parent class.
"""
_filetype = 'pickle'
_schema_subtype_description = ('The file contains one or more pickled '
'Python objects.')
_default_serializer = 'pickle'
_default_extension = '.pkl'
|
95309
|
import pytest
from cleo.io.inputs.token_parser import TokenParser
@pytest.mark.parametrize(
"string, tokens",
[
("", []),
("foo", ["foo"]),
(" foo bar ", ["foo", "bar"]),
('"quoted"', ["quoted"]),
("'quoted'", ["quoted"]),
("'a\rb\nc\td'", ["a\rb\nc\td"]),
("'a'\r'b'\n'c'\t'd'", ["a", "b", "c", "d"]),
("\"quoted 'twice'\"", ["quoted 'twice'"]),
("'quoted \"twice\"'", ['quoted "twice"']),
("\\'escaped\\'", ["'escaped'"]),
('\\"escaped\\"', ['"escaped"']),
("\\'escaped more\\'", ["'escaped", "more'"]),
('\\"escaped more\\"', ['"escaped', 'more"']),
("-a", ["-a"]),
("-azc", ["-azc"]),
("-awithavalue", ["-awithavalue"]),
('-a"foo bar"', ["-afoo bar"]),
('-a"foo bar""foo bar"', ["-afoo barfoo bar"]),
("-a'foo bar'", ["-afoo bar"]),
("-a'foo bar''foo bar'", ["-afoo barfoo bar"]),
("-a'foo bar'\"foo bar\"", ["-afoo barfoo bar"]),
("--long-option", ["--long-option"]),
("--long-option=foo", ["--long-option=foo"]),
('--long-option="foo bar"', ["--long-option=foo bar"]),
('--long-option="foo bar""another"', ["--long-option=foo baranother"]),
("--long-option='foo bar'", ["--long-option=foo bar"]),
("--long-option='foo bar''another'", ["--long-option=foo baranother"]),
("--long-option='foo bar'\"another\"", ["--long-option=foo baranother"]),
("foo -a -ffoo --long bar", ["foo", "-a", "-ffoo", "--long", "bar"]),
("\\' \\\"", ["'", '"']),
],
)
def test_create(string, tokens):
assert TokenParser().parse(string) == tokens
|
95376
|
from typing import Tuple
from os.path import join
import sys
from coreml.utils.logger import color
from coreml.utils.io import read_yml
from coreml.utils.typing import DatasetConfigDict
def read_dataset_from_config(
data_root: str, dataset_config: DatasetConfigDict) -> dict:
"""
Loads and returns the dataset version file corresponding to the
dataset config.
:param data_root: directory where data versions reside
:type dataset_config: DatasetConfigDict
:param dataset_config: dict containing `(name, version, mode)`
corresponding to a dataset. Here, `name` stands for the name of the
dataset under the `/data` directory, `version` stands for the version
of the dataset (stored in `/data/name/processed/versions/`) and `mode`
stands for the split to be loaded (train/val/test).
:type dataset_config: DatasetConfigDict
:returns: dict of values stored in the version file
"""
version_fpath = join(
data_root, dataset_config['name'],
'processed/versions', dataset_config['version'] + '.yml')
print(color("=> Loading dataset version file: [{}, {}, {}]".format(
dataset_config['name'], dataset_config['version'],
dataset_config['mode'])))
version_file = read_yml(version_fpath)
return version_file[dataset_config['mode']]
|
95405
|
import numpy as np
from igraph import *
# Define some useful graph functions
def plotGraph(g, name=None):
'''Function that plots a graph. Requires the pycairo library.'''
color_dict = {0: "blue", 1: "#008833"}
visual_style = {}
visual_style["vertex_label_dist"] = 2
visual_style["vertex_size"] = 20
visual_style["vertex_label"] = g.vs["name"]
visual_style["edge_width"] = [1 + 1.5 * (1 - abs(val)) for val in g.es["confounding"]]
visual_style["edge_color"] = [color_dict[abs(val)] for val in g.es["confounding"]]
visual_style["edge_curved"] = [(val) * 0.2 for val in g.es["confounding"]]
visual_style["layout"] = g.layout("circle")
visual_style["bbox"] = (300, 300)
visual_style["margin"] = 40
if name is not None:
return plot(g, name + ".png", **visual_style)
return plot(g, **visual_style)
def get_directed_bidirected_graphs(g):
'''Function that, given a graph, it decouples it and returns confounded graph
and a graph with visible causations.'''
adj_bidir = np.asarray(g.get_adjacency().data) + np.asarray(g.get_adjacency().data).T
adj_bidir[adj_bidir < 2] = 0
adj_bidir[adj_bidir >= 2] = 1
adj_dir = np.asarray(g.get_adjacency().data) - adj_bidir
g_dir = Graph.Adjacency(adj_dir.tolist())
g_bidir = Graph.Adjacency(adj_bidir.tolist())
g_dir.vs["name"] = g.vs["name"]
g_bidir.vs["name"] = g.vs["name"]
confounding_dir = [0 for edge in g_dir.es]
confounding_bidir = []
for edge in g_bidir.es:
if edge.source_vertex["name"] < edge.target_vertex["name"]:
confounding_bidir.append(-1)
else:
confounding_bidir.append(1)
g_bidir.es["confounding"] = confounding_bidir
g_dir.es["confounding"] = confounding_dir
return g_dir, g_bidir
def get_C_components(g):
'''Function that returs the different C-components of a graph.'''
g_dir, g_bidir = get_directed_bidirected_graphs(g)
g_out = g_bidir.copy()
for e in g_dir.es:
if e.target_vertex.index in g_bidir.subcomponent(e.source_vertex.index):
g_out.add_edge(e.source_vertex.index, e.target_vertex.index)
return g_out.decompose()
def get_vertices_no_parents(g):
'''Function that returns the set of vertices without parents'''
degrees = g.degree(mode="in")
vertices = []
for i in range(len(degrees)):
if degrees[i] == 0:
vertices.append(g.vs[i]["name"])
return set(vertices)
def get_topological_ordering(g):
'''Function that returns the ordering of the vertices of a graph'''
g_dir, g_bidir = get_directed_bidirected_graphs(g)
return [g_dir.vs[index]["name"] for index in g_dir.topological_sorting()]
def get_previous_order(v, possible, ordering):
'''Function that returns all previous vertices of a initial vertex from
a possible set of vertices and an ordernig of the graph'''
return set(ordering[:ordering.index(v)]).intersection(possible)
def get_ancestors(g, v_name):
'''Function that returns a set containing all ancestors of a vertex,
including itself'''
if not g.is_dag():
raise ValueError("Graph contains a cycle")
ancestors = []
if type(v_name) == type(set()):
for e in v_name:
ancestors += get_ancestors(g, e)
else:
ancestors = [v_name]
parents = [v_name]
checked = [v_name]
while len(parents) > 0:
name_vertex = parents.pop(0)
checked.append(name_vertex)
new_neighbors = g.neighbors(g.vs.find(name=name_vertex), mode="in")
for i in range(len(new_neighbors)):
if g.vs[new_neighbors[i]]["name"] not in ancestors:
ancestors.append(g.vs[new_neighbors[i]]["name"])
if g.vs[new_neighbors[i]]["name"] not in parents and g.vs[new_neighbors[i]]["name"] not in checked:
parents.append(g.vs[new_neighbors[i]]["name"])
return set(ancestors)
def get_descendants(g, v_name):
'''Function that returns a set containing all descendants of a vertex,
including itself'''
if not g.is_dag():
raise ValueError("Graph contains a cycle")
descendants = []
if type(v_name) == type(set()):
for e in v_name:
descendants += get_descendants(g, e)
else:
descendants = [v_name]
children = [v_name]
checked = [v_name]
while len(children) > 0:
name_vertex = children.pop(0)
checked.append(name_vertex)
new_neighbors = g.neighbors(g.vs.find(name=name_vertex), mode="out")
for i in range(len(new_neighbors)):
if g.vs[new_neighbors[i]]["name"] not in descendants:
descendants.append(g.vs[new_neighbors[i]]["name"])
if g.vs[new_neighbors[i]]["name"] not in children and g.vs[new_neighbors[i]]["name"] not in checked:
children.append(g.vs[new_neighbors[i]]["name"])
return set(descendants)
def graphs_are_equal(g1, g2):
'''Function that checks if two given graphs are equal'''
return check_subgraph(g1, g2) and check_subgraph(g2, g1)
def check_subcomponent(subcomponent, components):
'''Function that checks if a graph is part of a set of graphs'''
for g in components:
if graphs_are_equal(subcomponent, g):
return True
return False
def check_subgraph(g1, g2):
'''Function that checks ig a graph g1 is a subgraph of g2'''
# Check that g1<g2
g1_vertices = set(g1.vs["name"])
g2_vertices = set(g2.vs["name"])
if not g1_vertices.issubset(g2_vertices):
return False
if len(g1.es) > len(g2.es):
return False
# check edges g1 included in g2
for edge in g1.es:
source = edge.source_vertex["name"]
target = edge.target_vertex["name"]
outgoing_vertices = g2.vs(g2.neighbors(g2.vs.find(name=source), mode="out"))["name"]
if target not in outgoing_vertices:
return False
return True
def createGraph(list_edges_string, verbose=False):
'''Creates a graph from a list of edges in string-format.'''
vertices = []
edges = []
confounding = []
for e in list_edges_string:
conf = 0
e = e.replace(" ", "")
endpoints = e.replace("<", "").replace("-", "").replace(">", "")
for i in range(1, len(endpoints)):
if endpoints[i].isalpha():
vertex1, vertex2 = endpoints[:i], endpoints[i:]
break
e = e.replace(vertex1, "").replace(vertex2, "")
index1, index2 = -1, -1
if vertex1 in vertices:
index1 = vertices.index(vertex1)
else:
index1 = len(vertices)
vertices.append(vertex1)
if vertex2 in vertices:
index2 = vertices.index(vertex2)
else:
index2 = len(vertices)
vertices.append(vertex2)
if (e[0] == '<'):
conf += 1
edges.append((index2, index1))
if (e[-1] == '>'):
conf += 1
edges.append((index1, index2))
# confounding edge
if (conf == 2):
confounding.append(1)
confounding.append(-1)
else:
confounding.append(0)
if verbose: print(vertices)
if verbose: print(edges)
if verbose: print(confounding)
g = Graph(vertex_attrs={"name": vertices}, edges=edges, directed=True)
g.es["confounding"] = confounding
return g
def to_R_notation(edges):
'''Function that, given a list of strings containing the edges of a graph, returns
the equivalent graph information for causaleffect package in R.'''
# ["X->Y", "X<-A", "X<-E", "X<-V", "Y<-A", "Y<-H_1", "Y<-G", "Y<-V", "Y<-H", "Y<-E", "H->V", "V->E"]
bidirected = []
final_bidirected = []
for i, e in enumerate(edges):
e = e.replace("<", "+")
e = e.replace(">", "+")
edges[i] = e
if e.count("+") == 2:
bidirected.append(e)
directed = [e for e in edges if e not in bidirected]
for e in bidirected:
one = e.replace("+", "", 1)
two = e[::-1].replace("+", "", 1)[::-1]
final_bidirected.append(one)
final_bidirected.append(two)
out = ', '.join(directed + final_bidirected)
return out, len(directed) + 1, len(directed) + len(final_bidirected)
def unobserved_graph(g):
'''Constructs a causal diagram where confounded variables have explicit unmeasurable nodes
from a DAG of bidirected edges'''
G = g.copy()
vertices = G.vs["name"]
delete_edges = []
add_edges = []
for e in G.es:
if e["confounding"] != 0:
delete_edges.append(e.index)
if e["confounding"] == 1:
new_vertex_name = G.vs[e.source]["name"] + G.vs[e.target]["name"]
if e["confounding"] == -1:
new_vertex_name = G.vs[e.target]["name"] + G.vs[e.source]["name"]
src = -1
if new_vertex_name in vertices:
src = vertices.index(new_vertex_name)
else:
src = len(vertices)
vertices.append(new_vertex_name)
G.add_vertices(1)
G.vs[-1]["name"] = new_vertex_name
add_edges.append((src, e.target))
G.add_edges(add_edges)
G.delete_edges(delete_edges)
G.es["confounding"] = [0 for i in G.es]
return G
def dSep(G, Y, node, cond, verbose=False):
'''Checks if node and the set Y are d-separated, given the whole graph G and the measured variables cond'''
if verbose: print('dSep: dSep of node:', node, ' to set: ', Y)
for y in Y:
if verbose: print('dSep: Path from ', y, ' to ', node)
paths = G.get_all_simple_paths(v=G.vs.find(name_eq=node), to=G.vs.find(name_eq=y), mode='ALL')
if verbose: print('dSep: All possible paths: ', paths)
for p in paths:
if verbose:
p_name = []
for i in p:
p_name.append(G.vs[i]["name"])
print('dSep: Path:', p, p_name)
if not is_path_d_separated(G, p, cond, verbose=verbose):
return False
# If all paths are d-separated, return True
return True
def is_path_d_separated(G, p, cond, verbose=False):
'''Checks if a path is d-separated, given the whole graph G and the measured variables cond'''
if verbose: print('is_path_d_separated: Path ', p, 'Conditional: ', cond)
if G.vs[p[0]]["name"] in cond or G.vs[p[-1]]["name"] in cond:
raise Exception('Source or target nodes in conditional d-separation path')
for i in range(len(p) - 2):
e1, e2 = '', ''
if len(G.es.select(_source=p[i]).select(_target=p[i + 1])):
if verbose: print(G.vs[p[i]]["name"], '->', G.vs[p[i + 1]]["name"])
e1 = 'r'
if len(G.es.select(_source=p[i + 1]).select(_target=p[i])):
if verbose: print(G.vs[p[i]]["name"], '<-', G.vs[p[i + 1]]["name"])
if e1 == 'r':
e1 = 'b'
else:
e1 = 'l'
if len(G.es.select(_source=p[i + 1]).select(_target=p[i + 2])):
if verbose: print(G.vs[p[i + 1]]["name"], '->', G.vs[p[i + 2]]["name"])
e2 = 'r'
if len(G.es.select(_source=p[i + 2]).select(_target=p[i + 1])):
if verbose: print(G.vs[p[i + 1]]["name"], '<-', G.vs[p[i + 2]]["name"])
if e2 == 'r':
e2 = 'b'
else:
e2 = 'l'
if ((e1 == 'r' and e2 == 'r') or (e1 == 'l' and e2 == 'l') or (e1 == 'l' and e2 == 'r') or
(e1 == 'l' and e2 == 'b') or (e1 == 'b' and e2 == 'r')): # -> -> // <- <- // <- -> // <- <-> // <-> ->
if G.vs[p[i + 1]]["name"] in cond:
if verbose: print('is_path_d_separated: Chain or Fork:', G.vs[p[i]]["name"], e1, G.vs[p[i+1]]["name"], e2, G.vs[p[i+2]]["name"])
return True
if ((e1 == 'r' and e2 == 'l') or (e1 == 'r' and e2 == 'b') or (e1 == 'b' and e2 == 'l') or
(e1 == 'b' and e2 == 'b')): # -> <- // -> <-> // <-> <- // <-> <->
G_dir, G_bidir = get_directed_bidirected_graphs(G)
if len(get_descendants(G_dir, G.vs[p[i + 1]]["name"]).intersection(cond)) == 0:
if verbose: print('is_path_d_separated: Collider:', G.vs[p[i]]["name"], e1, G.vs[p[i+1]]["name"], e2, G.vs[p[i+2]]["name"])
return True
if verbose: print('is_path_d_separated: d-connected path ', p, 'Conditional: ', cond)
return False
def printGraph(G):
'''Function that returns a tuple with list of nodes and a list of edges of the graph.'''
edges = []
confounded = []
for edge in G.es:
if edge["confounding"] == -1:
confounded.append(edge.source_vertex["name"] + "<->" + edge.target_vertex["name"])
elif edge["confounding"] == 1:
confounded.append(edge.target_vertex["name"] + "<->" + edge.source_vertex["name"])
else:
edges.append(edge.source_vertex["name"] + "->" + edge.target_vertex["name"])
confounded = list(set(confounded))
nodes = G.vs["name"]
return nodes, edges + confounded
|
95416
|
from tir import Webapp
import unittest
from datetime import datetime
DateSystem = datetime.today().strftime("%d/%m/%Y")
class TRKXFUN(unittest.TestCase):
@classmethod
def setUpClass(self):
self.oHelper = Webapp(autostart=False)
self.oHelper.SetTIRConfig(config_name="User", value="telemarketing")
self.oHelper.SetTIRConfig(config_name="Password", value="1")
def test_TRKXFUN_CT001(self):
self.oHelper.Start()
self.oHelper.Setup("SIGATMK",DateSystem,"T1","D MG 01","13")
self.oHelper.Program("TMKA271")
self.oHelper.AddParameter("MV_FATPROP","D MG 01","O","O","O")
self.oHelper.SetParameters()
self.oHelper.SearchBrowse(f"D MG 01 000020", "Filial+atendimento")
self.oHelper.SetButton("Visualizar")
self.oHelper.SetButton("Outras Ações", "Tracker da Entidade")
self.oHelper.SetButton("Rastrear")
self.oHelper.ClickTree("Atendimento Telemarketing - 000020")
self.oHelper.SetButton("Abandonar")
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("X")
self.oHelper.RestoreParameters()
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(self):
self.oHelper.TearDown()
if __name__ == "__main__":
unittest.main()
|
95422
|
def common_ground(s1,s2):
words = s2.split()
return ' '.join(sorted((a for a in set(s1.split()) if a in words),
key=lambda b: words.index(b))) or 'death'
|
95449
|
from wellcomeml.datasets.conll import _load_data_spacy, load_conll
import pytest
def test_length():
X, Y = _load_data_spacy("tests/test_data/test_conll", inc_outside=True)
assert len(X) == len(Y) and len(X) == 4
def test_entity():
X, Y = _load_data_spacy("tests/test_data/test_conll", inc_outside=False)
start = Y[0][0]["start"]
end = Y[0][0]["end"]
assert X[0][start:end] == "LEICESTERSHIRE"
def test_no_outside_entities():
X, Y = _load_data_spacy("tests/test_data/test_conll", inc_outside=False)
outside_entities = [
entity for entities in Y for entity in entities if entity["label"] == "O"
]
assert len(outside_entities) == 0
def test_load_conll():
X, y = load_conll(dataset="test_conll")
assert isinstance(X, tuple)
assert isinstance(y, tuple)
assert len(X) == 4
assert len(y) == 4
def test_load_conll_raises_KeyError():
with pytest.raises(KeyError):
load_conll(split="wrong_argument")
|
95453
|
from .moc import MOC
from .plot.wcs import World2ScreenMPL
__all__ = [
'MOC',
'World2ScreenMPL'
]
|
95454
|
from chemios.pumps import Chemyx
from chemios.utils import SerialTestClass
#from dummyserial import Serial
import pytest
import logging
#Logging
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
logfile = 'chemios_dev.log'
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
@pytest.fixture()
def ser():
'''Create a mock serial port'''
ser = SerialTestClass()
my_ser = ser.ser
return my_ser
# @pytest.fixture(scope='module')
# def mock_chemyx():
# mock_responses = {
# 'start\x0D': "Pump start running…",
# 'stop\x0D': "Pump stop!",
# 'set diameter 1.00\x0D': 'diameter = 1.00',
# 'set diameter 4.65\x0D': 'diameter = 4.65',
# 'set rate 10.0\x0D': 'rate = 10.0',
# 'set rate 100\x0D': 'rate = 100',
# 'set units 0\x0D': 'units = 0',
# 'set units 1\x0D': 'units = 1',
# 'set units 2\x0D': 'units = 2',
# 'set units 3\x0D': 'units = 3'
# }
# ser = Serial(port='test',
# ds_responses=mock_responses)
# return ser
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 200',
'Fusion 4000', 'Fusion 6000',
'NanoJet', 'OEM'])
def test_init(ser, model):
'''Test initialization of chemyx pump'''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
Chemyx(model=model, ser=ser)
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 200',
'Fusion 4000', 'Fusion 6000',
'NanoJet', 'OEM'])
def test_get_info(ser, model):
'''Test Get info'''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
C = Chemyx(model=model, ser=ser)
C.get_info()
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 4000','OEM'])
@pytest.mark.parametrize('manufacturer', ['terumo-japan', 'terumo','sge'])
@pytest.mark.parametrize('volume', [5.0, 10.0])
def test_set_syringe(ser, model, manufacturer, volume):
'''Test set_sryinge info'''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
C = Chemyx(model=model, ser=ser,
syringe_manufacturer=manufacturer, syringe_volume=volume)
#Reduce retry time for unit tests
C.retry = 0.01
C.set_syringe(manufacturer=manufacturer, volume=volume)
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 4000','OEM'])
def test_rate(ser, model):
'''Test Run info'''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
C = Chemyx(model=model, ser=ser,
syringe_manufacturer='terumo-japan', syringe_volume=1)
rate = {'value': 20, 'units': 'uL/min'}
C.set_rate(rate, 'INF')
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 200',
'Fusion 4000', 'Fusion 6000',
'NanoJet', 'OEM'])
def test_run(ser, model):
'''Test Run info'''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
C = Chemyx(model=model, ser=ser,
syringe_manufacturer='terumo-japan', syringe_volume=1)
rate = {'value': 20, 'units': 'uL/min'}
C.set_rate(rate, 'INF')
C.run()
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 4000','OEM'])
@pytest.mark.parametrize('units', ['mL/min', 'mL/hr', 'uL/min', 'uL/hr'])
def test_set_units(ser, model, units):
'''Test set_units '''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
C = Chemyx(model=model, ser=ser)
C.set_units(units)
@pytest.mark.parametrize('model', ['Fusion 100', 'Fusion 4000','OEM'])
def test_stop(ser, model):
'''Test stop'''
if model in ['Nanojet', 'OEM']:
ser.baudrate = 38400
C = Chemyx(model=model, ser=ser)
C.stop()
|
95458
|
import pytest
from paste.deploy.config import ConfigMiddleware
class Bug(Exception):
pass
def app_with_exception(environ, start_response):
def cont():
yield b"something"
raise Bug
start_response('200 OK', [('Content-type', 'text/html')])
return cont()
def test_error():
# This import is conditional due to Paste not yet working on py3k
try:
from paste.fixture import TestApp
except ImportError:
raise pytest.skip('unable to import TestApp')
wrapped = ConfigMiddleware(app_with_exception, {'test': 1})
test_app = TestApp(wrapped)
pytest.raises(Bug, test_app.get, '/')
|
95489
|
import pika
import json
credentials = pika.PlainCredentials("cc-dev", "<PASSWORD>")
parameters = pika.ConnectionParameters(
host="127.0.0.1",
port=5672,
virtual_host="cc-dev-ws",
credentials=credentials)
conn = pika.BlockingConnection(parameters)
assert conn.is_open
try:
ch = conn.channel()
assert ch.is_open
headers = {"version": "0.1b", "system": "taxi"}
properties = pika.BasicProperties(content_type="application/json", headers=headers)
message = {"latitude": 0.0, "longitude": -1.0}
message = json.dumps(message)
ch.basic_publish(
exchange="taxi_header_exchange",
body=message,
properties=properties,
routing_key="")
finally:
conn.close()
|
95522
|
from django.contrib import admin
from django.urls import path
import wordcount.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', wordcount.views.home, name="home"),
path('about/', wordcount.views.about, name='about'),
path('result/', wordcount.views.result, name='result'),
]
|
95545
|
import os
import hashlib
from libcard2 import string_mgr
from libcard2.master import MasterData
LANG_TO_FTS_CONFIG = {
"en": "card_fts_cfg_english",
}
TLINJECT_DUMMY_BASE_LANG = "en"
async def add_fts_string(lang: str, key: str, value: str, referent: int, connection):
if not (fts_lang := LANG_TO_FTS_CONFIG.get(lang)):
return
hash = hashlib.sha224(value.encode("utf8"))
await connection.execute(
"""
INSERT INTO card_fts_v2 VALUES ($1, $2, $3, to_tsvector($6, $4), 'dict', $5)
ON CONFLICT (langid, key, origin, referent_id) DO UPDATE
SET terms = excluded.terms WHERE card_fts_v2.dupe != excluded.dupe
""",
lang,
key,
referent,
value,
hash.digest(),
fts_lang,
)
async def update_fts(
lang: str, master: MasterData, dicts: string_mgr.DictionaryAccess, coordinator
):
can_add_dictionary_tls = lang in LANG_TO_FTS_CONFIG
async with coordinator.pool.acquire() as conn, conn.transaction():
for id in master.card_ordinals_to_ids(master.all_ordinals()):
card = master.lookup_card_by_id(id, use_cache=False)
t_set = []
if card.normal_appearance:
t_set.append(card.normal_appearance.name)
if card.idolized_appearance:
t_set.append(card.idolized_appearance.name)
for key in t_set:
await conn.execute(
"""
INSERT INTO card_fts_v2 VALUES ($1, $2, $3, NULL, 'tlinject', NULL)
ON CONFLICT (langid, key, origin, referent_id) DO NOTHING
""",
TLINJECT_DUMMY_BASE_LANG,
key,
card.id,
)
if can_add_dictionary_tls:
strings = dicts.lookup_strings(t_set)
for orig_key, value in strings.items():
await add_fts_string(lang, orig_key, value, card.id, conn)
|
95586
|
import numpy as np
from util.gym import action_size
from util.logger import logger
from motion_planners.sampling_based_planner import SamplingBasedPlanner
class PlannerAgent:
def __init__(
self,
config,
ac_space,
non_limited_idx=None,
passive_joint_idx=[],
ignored_contacts=[],
planner_type=None,
goal_bias=0.05,
is_simplified=False,
simplified_duration=0.1,
range_=None,
):
self._config = config
self.planner = SamplingBasedPlanner(
config,
config._xml_path,
action_size(ac_space),
non_limited_idx,
planner_type=planner_type,
passive_joint_idx=passive_joint_idx,
ignored_contacts=ignored_contacts,
contact_threshold=config.contact_threshold,
goal_bias=goal_bias,
is_simplified=is_simplified,
simplified_duration=simplified_duration,
range_=range_,
)
self._is_simplified = is_simplified
self._simplified_duration = simplified_duration
def plan(self, start, goal, timelimit=None, attempts=15):
config = self._config
if timelimit is None:
timelimit = config.timelimit
traj, states, valid, exact = self.planner.plan(start, goal, timelimit)
success = valid and exact
if success:
return traj[1:], success, valid, exact
else:
return traj, success, valid, exact
def get_planner_status(self):
return self.planner.get_planner_status()
def isValidState(self, state):
return self.planner.isValidState(state)
|
95617
|
import os
import math
from pprint import PrettyPrinter
import random
import numpy as np
import torch # Torch must be imported before sklearn and tf
import sklearn
import tensorflow as tf
import better_exceptions
from tqdm import tqdm, trange
import colorlog
import colorful
from utils.etc_utils import set_logger, set_tcmalloc, set_gpus, check_none_gradients
from utils import config_utils, custom_argparsers
from models import MODELS
from modules.checkpoint_tracker import CheckpointTracker
from modules.trainer import run_wow_evaluation, Trainer
from modules.from_parlai import download_from_google_drive, unzip
from data.wizard_of_wikipedia import WowDatasetReader
from data.holle import HolleDatasetReader
better_exceptions.hook()
_command_args = config_utils.CommandArgs()
pprint = PrettyPrinter().pprint
pformat = PrettyPrinter().pformat
BEST_N_CHECKPOINTS = 5
def main():
# Argument passing/parsing
args, model_args = config_utils.initialize_argparser(
MODELS, _command_args, custom_argparsers.DialogArgumentParser)
hparams, hparams_dict = config_utils.create_or_load_hparams(
args, model_args, args.cfg)
pprint(hparams_dict)
# Set environment variables & gpus
set_logger()
set_gpus(hparams.gpus)
set_tcmalloc()
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus, 'GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# Set random seed
tf.random.set_seed(hparams.random_seed)
np.random.seed(hparams.random_seed)
random.seed(hparams.random_seed)
# For multi-gpu
if hparams.num_gpus > 1:
mirrored_strategy = tf.distribute.MirroredStrategy() # NCCL will be used as default
else:
mirrored_strategy = None
# Download BERT pretrained model
if not os.path.exists(hparams.bert_dir):
os.makedirs(hparams.bert_dir)
fname = 'uncased_L-12_H-768_A-12.zip'
gd_id = '17rfV9CleFBwwfS7m5Yd72vvxdPLWBHl6'
download_from_google_drive(gd_id, os.path.join(hparams.bert_dir, fname))
unzip(hparams.bert_dir, fname)
# Make dataset reader
os.makedirs(hparams.cache_dir, exist_ok=True)
if hparams.data_name == "wizard_of_wikipedia":
reader_cls = WowDatasetReader
elif hparams.data_name == "holle":
reader_cls = HolleDatasetReader
else:
raise ValueError("data_name must be one of 'wizard_of_wikipedia' and 'holle'")
reader = reader_cls(
hparams.batch_size, hparams.num_epochs,
buffer_size=hparams.buffer_size,
bucket_width=hparams.bucket_width,
max_length=hparams.max_length,
max_episode_length=hparams.max_episode_length,
max_knowledge=hparams.max_knowledge,
knowledge_truncate=hparams.knowledge_truncate,
cache_dir=hparams.cache_dir,
bert_dir=hparams.bert_dir,
)
train_dataset, iters_in_train = reader.read('train', mirrored_strategy)
test_dataset, iters_in_test = reader.read('test', mirrored_strategy)
if hparams.data_name == 'wizard_of_wikipedia':
unseen_dataset, iters_in_unseen = reader.read('test_unseen', mirrored_strategy)
vocabulary = reader.vocabulary
# Build model & optimizer & trainer
if mirrored_strategy:
with mirrored_strategy.scope():
model = MODELS[hparams.model](hparams, vocabulary)
optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
clipnorm=hparams.clipnorm)
else:
model = MODELS[hparams.model](hparams, vocabulary)
optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
clipnorm=hparams.clipnorm)
trainer = Trainer(model, optimizer, mirrored_strategy,
hparams.enable_function,
reader_cls.remove_pad)
# misc (tensorboard, checkpoints)
file_writer = tf.summary.create_file_writer(hparams.checkpoint_dir)
file_writer.set_as_default()
global_step = tf.compat.v1.train.get_or_create_global_step()
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=global_step)
checkpoint_manager = tf.train.CheckpointManager(checkpoint,
directory=hparams.checkpoint_dir,
max_to_keep=hparams.max_to_keep)
checkpoint_tracker = CheckpointTracker(
hparams.checkpoint_dir, max_to_keep=BEST_N_CHECKPOINTS)
# Main loop!
train_dataset_iter = iter(train_dataset)
for epoch in range(hparams.num_epochs):
print(hparams.checkpoint_dir)
base_description = f"(Train) Epoch {epoch}, GPU {hparams.gpus}"
train_tqdm = trange(iters_in_train, ncols=120, desc=base_description)
for current_step in train_tqdm:
example = next(train_dataset_iter)
global_step.assign_add(1)
_global_step = int(global_step)
# Train
output_dict = trainer.train_step(example)
# Print model
if _global_step == 1:
model.print_model()
loss_str = str(output_dict['loss'].numpy())
train_tqdm.set_description(f"{base_description}, Loss {loss_str}")
with file_writer.as_default():
if _global_step % int(hparams.logging_step) == 0:
tf.summary.histogram('train/vocab', output_dict['sample_ids'], step=_global_step)
tf.summary.scalar('train/loss', output_dict['loss'], step=_global_step)
tf.summary.scalar('train/gen_loss', output_dict['gen_loss'], step=_global_step)
tf.summary.scalar('train/knowledge_loss', output_dict['knowledge_loss'], step=_global_step)
tf.summary.scalar('train/kl_loss', output_dict['kl_loss'], step=_global_step)
# Test
if _global_step % int(iters_in_train * hparams.evaluation_epoch) == 0:
checkpoint_manager.save(global_step)
test_loop_outputs = trainer.test_loop(test_dataset, iters_in_test, epoch, 'seen')
if hparams.data_name == 'wizard_of_wikipedia':
unseen_loop_outputs = trainer.test_loop(unseen_dataset, iters_in_unseen, epoch, 'unseen')
test_summaries, log_dict = run_wow_evaluation(
test_loop_outputs, hparams.checkpoint_dir, 'seen')
if hparams.data_name == 'wizard_of_wikipedia':
unseen_summaries, unseen_log_dict = run_wow_evaluation(
unseen_loop_outputs, hparams.checkpoint_dir, 'unseen')
# Logging
tqdm.write(colorful.bold_green("seen").styled_string)
tqdm.write(colorful.bold_red(pformat(log_dict)).styled_string)
if hparams.data_name == 'wizard_of_wikipedia':
tqdm.write(colorful.bold_green("unseen").styled_string)
tqdm.write(colorful.bold_red(pformat(unseen_log_dict)).styled_string)
with file_writer.as_default():
for family, test_summary in test_summaries.items():
for key, value in test_summary.items():
tf.summary.scalar(f'{family}/{key}', value, step=_global_step)
if hparams.data_name == 'wizard_of_wikipedia':
for family, unseen_summary in unseen_summaries.items():
for key, value in unseen_summary.items():
tf.summary.scalar(f'{family}/{key}', value, step=_global_step)
if hparams.keep_best_checkpoint:
current_score = log_dict["rouge1"]
checkpoint_tracker.update(current_score, _global_step)
if __name__ == '__main__':
main()
|
95624
|
from qt import *
from qtcanvas import *
from lpathtree_qt import *
class Point:
def __init__(self, *args):
if len(args) == 2 and \
(isinstance(args[0],int) or isinstance(args[0],float)) and \
(isinstance(args[1],int) or isinstance(args[0],float)):
self.x = float(args[0])
self.y = float(args[1])
elif len(args) == 1 and \
isinstance(args[0],QPoint):
self.x = float(args[0].x())
self.y = float(args[0].y())
else:
raise TypeError("invalid argument type")
def __add__(self, p):
if not isinstance(p,Point):
raise TypeError("invalid argument type")
return Point(self.x+p.x, self.y+p.y)
def __sub__(self, p):
if not isinstance(p,Point):
raise TypeError("invalid argument type")
return Point(self.x-p.x, self.y-p.y)
def __mul__(self, n):
if not isinstance(n,int) and \
not isinstance(n,float):
raise TypeError("invalid argument type")
n = float(n)
return Point(self.x*n,self.y*n)
def __div__(self, n):
if not isinstance(n,int) and \
not isinstance(n,float):
raise TypeError("invalid argument type")
n = float(n)
return Point(self.x/n,self.y/n)
class TreeCanvasNode(QCanvasText):
def __init__(self, node=None, canvas=None):
assert(isinstance(node,LPathTreeModel))
if 'label' in node.data and node.data['label']:
QCanvasText.__init__(self, node.data['label'], canvas)
else:
QCanvasText.__init__(self, '', canvas)
node.gui = self
self.numberWidget = QCanvasText(canvas)
self.numberWidget.setColor(Qt.lightGray)
self.numberHidden = True
self.node = node
self.triangle = QCanvasPolygon(canvas)
self.triangle.setBrush(QBrush(Qt.gray))
def hide(self):
self.numberWidget.hide()
self.triangle.hide()
QCanvasText.hide(self)
def draw(self, painter):
self.updateNumber()
alignment = self.node.lpAlignment()
if alignment == self.node.AlignLeft:
self.setText('^'+self.node.data['label'])
elif alignment == self.node.AlignRight:
self.setText(self.node.data['label']+'$')
elif alignment == self.node.AlignBoth:
self.setText("^%s$" % self.node.data['label'])
elif self.node.data['label']:
self.setText(self.node.data['label'])
else:
self.setText('')
if self.node.collapsed:
dw = self.width() / 2.0
x1 = self.x() + dw
y1 = self.y() + self.height()
pa = QPointArray(3)
pa.setPoint(0, x1,y1)
pa.setPoint(1, x1-dw,y1+self.height())
pa.setPoint(2, x1+dw,y1+self.height())
self.triangle.setPoints(pa)
self.triangle.show()
else:
self.triangle.hide()
QCanvasText.draw(self, painter)
def clear(self):
f = self.font()
f.setUnderline(False)
self.setFont(f)
def width(self):
return self.boundingRect().width()
def height(self):
return self.boundingRect().height()
def intersection(self, item):
p = Point(item.boundingRect().center())
box = self.boundingRect()
c = Point(box.center())
v = p - c
if self == item:
return c
elif v.x != 0:
v = v / abs(v.x)
elif v.y > 0:
return Point(c.x,box.bottom())
else:
return Point(c.x,box.top())
v1 = Point(box.bottomRight() - box.topLeft())
if v1.x > 0.0:
v1 = v1 / v1.x
if abs(v.y) < v1.y:
dx = box.width() / 2.0
x = c.x + dx * v.x
y = c.y + dx * v.y
else:
if v.y != 0:
v = v / abs(v.y)
dy = box.height() / 2.0
x = c.x + dy * v.x
y = c.y + dy * v.y
elif v.x > 0:
x = box.right()
y = c.y
else:
x = box.left()
y = c.y
return Point(x, y)
def connectingLine(self, item):
p1 = self.intersection(item)
p2 = item.intersection(self)
return p1.x,p1.y,p2.x,p2.y
def updateNumber(self):
if self.node.lpIsolated():
self.numberHidden = True
self.numberWidget.hide()
else:
number = self.node.lpScopeDepth()
c = self.canvas()
w = self.numberWidget
c.setChanged(w.boundingRect())
w.setText("%d" % number)
r = self.boundingRect()
wr = w.boundingRect()
wy = r.top() - wr.height()
wx = r.left() + (r.width() - wr.width()) / 2.0
w.move(wx,wy)
c.setChanged(w.boundingRect())
self.numberHidden = False
w.show()
def getNumber(self):
self.node.lpScopeDepth()
def updateTrace(self):
f = self.font()
f.setUnderline(self.node.filterExpression is not None)
self.setFont(f)
self.canvas().update()
if __name__ == "__main__":
from qt import *
app = QApplication([])
c = QCanvas(100,100)
c.setBackgroundColor(Qt.blue)
w = QCanvasView(c)
n = TreeCanvasNode("test",c)
n.setColor(Qt.red)
n.show()
app.setMainWidget(w)
w.show()
app.exec_loop()
|
95643
|
from binascii import hexlify
from crypto.transactions.deserializers.base import BaseDeserializer
class DelegateResignationDeserializer(BaseDeserializer):
def deserialize(self):
self.transaction.parse_signatures(
hexlify(self.serialized).decode(),
self.asset_offset
)
return self.transaction
|
95697
|
from jivago.lang.annotations import Serializable
@Serializable
class MyDto(object):
name: str
age: int
def __init__(self, name: str, age: int):
self.name = name
self.age = age
|
95706
|
import os
from pathlib import Path
import pytest
@pytest.fixture
def example_repo_path():
if 'HEXRD_EXAMPLE_REPO_PATH' not in os.environ:
pytest.fail('Environment varable HEXRD_EXAMPLE_REPO_PATH not set!')
repo_path = os.environ['HEXRD_EXAMPLE_REPO_PATH']
return Path(repo_path)
|
95723
|
from typing import Iterable, Union
from datahub.emitter.mce_builder import get_sys_time
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api import RecordEnvelope
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Extractor, WorkUnit
from datahub.ingestion.api.workunit import MetadataWorkUnit, UsageStatsWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.mxe import (
MetadataChangeEvent,
MetadataChangeProposal,
SystemMetadata,
)
from datahub.metadata.schema_classes import UsageAggregationClass
class WorkUnitRecordExtractor(Extractor):
"""An extractor that simply returns the data inside workunits back as records."""
ctx: PipelineContext
def configure(self, config_dict: dict, ctx: PipelineContext) -> None:
self.ctx = ctx
def get_records(
self, workunit: WorkUnit
) -> Iterable[
RecordEnvelope[
Union[
MetadataChangeEvent,
MetadataChangeProposal,
MetadataChangeProposalWrapper,
UsageAggregationClass,
]
]
]:
if isinstance(workunit, MetadataWorkUnit):
if isinstance(workunit.metadata, MetadataChangeEvent):
mce = workunit.metadata
mce.systemMetadata = SystemMetadata(
lastObserved=get_sys_time(), runId=self.ctx.run_id
)
if len(mce.proposedSnapshot.aspects) == 0:
raise AttributeError("every mce must have at least one aspect")
if not workunit.metadata.validate():
raise ValueError(
f"source produced an invalid metadata work unit: {workunit.metadata}"
)
yield RecordEnvelope(
workunit.metadata,
{
"workunit_id": workunit.id,
},
)
elif isinstance(workunit, UsageStatsWorkUnit):
if not workunit.usageStats.validate():
raise ValueError(
f"source produced an invalid usage stat: {workunit.usageStats}"
)
yield RecordEnvelope(
workunit.usageStats,
{
"workunit_id": workunit.id,
},
)
else:
raise ValueError(f"unknown WorkUnit type {type(workunit)}")
def close(self):
pass
|
95738
|
from __future__ import print_function, unicode_literals
import logging
from collections import Counter
from re import compile, escape
from okcli.lexer import ORACLE_KEYWORDS
from prompt_toolkit.completion import Completer, Completion
from .packages.completion_engine import suggest_type
from .packages.parseutils import last_word
from .packages.special.favoritequeries import favoritequeries
_logger = logging.getLogger(__name__)
class SQLCompleter(Completer):
keywords = ORACLE_KEYWORDS
str_functions = ['ASCII', 'ASCIISTR', 'CHR', 'COMPOSE', 'CONCAT', 'CONVERT',
'DECOMPOSE', 'DUMP', 'INITCAP', 'INSTR', 'INSTR2',
'INSTR4', 'INSTRB', 'INSTRC',
'LENGTH', 'LENGTH2', 'LENGTH4',
'LENGTHB', 'LENGTHC', 'LOWER', 'LPAD', 'LTRIM', 'NCHR',
'REGEXP_INSTR', 'REGEXP_REPLACE', 'REGEXP_SUBSTR',
'REPLACE', 'RPAD', 'RTRIM', 'SOUNDEX', 'SUBSTR',
'TRANSLATE', 'TRIM', 'UPPER', 'VSIZE', ]
num_functions = ['ABS', 'ACOS', 'ASIN', 'ATAN', 'ATAN2',
'AVG', 'BITAND', 'CEIL', 'COS', 'COSH', 'COUNT', 'EXP',
'FLOOR', 'GREATEST', 'LEAST', 'LN', 'LOG', 'MAX', 'MEDIAN',
'MIN', 'MOD', 'POWER', 'REGEXP_COUNT', 'REMAINDER',
'ROUND', 'ROWNUM', 'SIGN', 'SIN', 'SINH', 'SQRT', 'SUM',
'TAN', 'TANH', 'TRUNC', ]
date_functions = ['ADD_MONTHS', 'CURRENT_DATE', 'CURRENT_TIMESTAMP',
'DBTIMEZONE', 'EXTRACT', 'LAST_DAY', 'LOCALTIMESTAMP',
'MONTHS_BETWEEN', 'NEW_TIME', 'NEXT_DAY', 'ROUND',
'SESSIONTIMEZONE', 'SYSDATE', 'SYSTIMESTAMP', 'TRUNC',
'TZ_OFFSET', ]
conv_functions = ['BIN_TO_NUM', 'CAST', 'CHARTOROWID', 'FROM_TZ',
'HEXTORAW', 'NUMTODSINTERVAL', 'NUMTOYMINTERVAL',
'RAWTOHEX', 'TO_CHAR', 'TO_CLOB', 'TO_DATE',
'TO_DSINTERVAL', 'TO_LOB', 'TO_MULTI_BYTE', 'TO_NCLOB',
'TO_NUMBER', 'TO_SINGLE_BYTE', 'TO_TIMESTAMP',
'TO_TIMESTAMP_TZ', 'TO_YMINTERVAL', ]
analytic_functions = ['CORR', 'COVAR_POP', 'COVAR_SAMP', 'CUME_DIST',
'DENSE_RANK', 'FIRST_VALUE', 'LAG', 'LAST_VALUE',
'LEAD', 'LISTAGG', 'NTH_VALUE', 'RANK', 'STDDEV',
'VAR_POP', 'VAR_SAMP', 'VARIANCE', ]
advanced_fuctions = ['BFILENAME', 'CARDINALITY', 'CASE', 'COALESCE',
'DECODE', 'EMPTY_BLOB', 'EMPTY_CLOB', 'GROUP_ID',
'LNNVL', 'NANVL', 'NULLIF', 'NVL', 'NVL2',
'SYS_CONTEXT',
'UID',
'USER',
'USERENV',
]
functions = sorted([x for x in
{x for x in
str_functions + num_functions + date_functions + conv_functions + analytic_functions + advanced_fuctions}
])
show_items = []
# TODO
change_items = []
users = []
def __init__(self, smart_completion=True, supported_formats=()):
super(self.__class__, self).__init__()
self.smart_completion = smart_completion
self.reserved_words = set()
for x in self.keywords:
self.reserved_words.update(x.split())
_logger.debug('reserved_words {}'.format(self.reserved_words))
self.name_pattern = compile(r"^[_a-z][_a-z0-9\$]*$")
self.special_commands = []
self.table_formats = supported_formats
self.reset_completions()
def escape_name(self, name):
return name
def unescape_name(self, name):
"""Unquote a string."""
if name and name[0] == '"' and name[-1] == '"':
name = name[1:-1]
return name
def escaped_names(self, names):
return [self.escape_name(name) for name in names]
def extend_special_commands(self, special_commands):
# Special commands are not part of all_completions since they can only
# be at the beginning of a line.
self.special_commands.extend(special_commands)
def extend_database_names(self, databases):
_logger.info('extending databases'.format(databases))
self.databases.extend(databases)
def extend_keywords(self, additional_keywords):
self.keywords.extend(additional_keywords)
self.all_completions.update(additional_keywords)
def extend_show_items(self, show_items):
for show_item in show_items:
self.show_items.extend(show_item)
self.all_completions.update(show_item)
def extend_change_items(self, change_items):
for change_item in change_items:
self.change_items.extend(change_item)
self.all_completions.update(change_item)
def extend_users(self, users):
_logger.debug('extending users {}'.format(users))
for user in users:
self.users.extend(user)
self.all_completions.update(user)
def extend_schemata(self, schemas):
_logger.debug('extending schema {}'.format(schemas))
for schema in schemas:
self._extend_schemata(schema)
def _extend_schemata(self, schema):
# dbmetadata.values() are the 'tables' and 'functions' dicts
_logger.debug('extending schema with {}'.format(schema))
schema = schema.upper()
for metadata in self.dbmetadata.values():
metadata[schema] = {}
self.all_completions.update(schema)
def extend_relations(self, data, kind, schema):
"""Extend metadata for tables or views
:param data: list of (rel_name, ) tuples
:param kind: either 'tables' or 'views'
:return:
"""
# 'data' is a generator object. It can throw an exception while being
# consumed. This could happen if the user has launched the app without
# specifying a database name. This exception must be handled to prevent
# crashing.
_logger.info('extending {} with {}'.format(kind, data))
schema = schema.upper()
try:
data = [self.escaped_names(d) for d in data]
except Exception:
_logger.error('Error escaping data {}'.format(data), exc_info=True)
data = []
# dbmetadata['tables'][$schema_name][$table_name] should be a list of
# column names. Default to an asterisk
# TODO
# add schema to data instead of self.dbname
#
metadata = self.dbmetadata[kind]
for relname in data:
name = relname[0]
try:
metadata[schema][name] = ['*']
except KeyError:
_logger.error('%r %r listed in unrecognized schema %r',
kind, name, schema)
self.all_completions.add(name)
def extend_columns(self, column_data, kind, schema):
"""Extend column metadata
:param column_data: list of (rel_name, column_name) tuples
:param kind: either 'tables' or 'views'
:return:
"""
schema = schema.upper()
# 'column_data' is a generator object. It can throw an exception while
# being consumed. This could happen if the user has launched the app
# without specifying a database name. This exception must be handled to
# prevent crashing.
try:
column_data = [self.escaped_names(d) for d in column_data]
except Exception:
column_data = []
metadata = self.dbmetadata[kind]
for relname, column in column_data:
metadata[schema][relname].append(column)
self.all_completions.add(column)
def extend_functions(self, func_data, schema):
# 'func_data' is a generator object. It can throw an exception while
# being consumed. This could happen if the user has launched the app
# without specifying a database name. This exception must be handled to
# prevent crashing.
try:
func_data = [self.escaped_names(d) for d in func_data]
except Exception:
func_data = []
# dbmetadata['functions'][$schema_name][$function_name] should return
# function metadata.
metadata = self.dbmetadata['functions']
for func in func_data:
metadata[schema][func[0]] = None
self.all_completions.add(func[0])
def set_dbname(self, dbname):
self.dbname = dbname.upper()
def reset_completions(self):
self.databases = []
self.users = []
self.show_items = []
self.dbname = ''
self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {}}
self.all_completions = set(self.keywords + self.functions)
@staticmethod
def find_matches(text, collection, start_only=False, fuzzy=True):
"""Find completion matches for the given text.
Given the user's input text and a collection of available
completions, find completions matching the last word of the
text.
If `start_only` is True, the text will match an available
completion only at the beginning. Otherwise, a completion is
considered a match if the text appears anywhere within it.
yields prompt_toolkit Completion instances for any matches found
in the collection of available completions.
"""
text = last_word(text, include='most_punctuations').lower()
completions = []
if fuzzy:
regex = '.*?'.join(map(escape, text))
pat = compile('(%s)' % regex)
for item in sorted(collection):
r = pat.search(item.lower())
if r:
completions.append((len(r.group()), r.start(), item))
else:
match_end_limit = len(text) if start_only else None
for item in sorted(collection):
match_point = item.lower().find(text, 0, match_end_limit)
if match_point >= 0:
completions.append((len(text), match_point, item))
return (Completion(z, -len(text)) for x, y, z in sorted(completions))
def get_completions(self, document, complete_event, smart_completion=None):
word_before_cursor = document.get_word_before_cursor(WORD=True)
if smart_completion is None:
smart_completion = self.smart_completion
# If smart_completion is off then match any word that starts with
# 'word_before_cursor'.
if not smart_completion:
return self.find_matches(word_before_cursor, self.all_completions,
start_only=True, fuzzy=False)
completions = []
suggestions = suggest_type(document.text, document.text_before_cursor)
for suggestion in suggestions:
_logger.debug('Suggestion type: %r', suggestion['type'])
if suggestion['type'] == 'column':
tables = suggestion['tables']
_logger.debug("Completion column scope: %r", tables)
scoped_cols = self.populate_scoped_cols(tables)
if suggestion.get('drop_unique'):
# drop_unique is used for 'tb11 JOIN tbl2 USING (...'
# which should suggest only columns that appear in more than
# one table
scoped_cols = [
col for (col, count) in Counter(scoped_cols).items()
if count > 1 and col != '*'
]
cols = self.find_matches(word_before_cursor, scoped_cols)
completions.extend(cols)
elif suggestion['type'] == 'function':
# suggest user-defined functions using substring matching
funcs = self.populate_schema_objects(suggestion['schema'],
'functions')
user_funcs = self.find_matches(word_before_cursor, funcs)
completions.extend(user_funcs)
# suggest hardcoded functions using startswith matching only if
# there is no schema qualifier. If a schema qualifier is
# present it probably denotes a table.
# eg: SELECT * FROM users u WHERE u.
if not suggestion['schema']:
predefined_funcs = self.find_matches(word_before_cursor,
self.functions,
start_only=True,
fuzzy=False)
completions.extend(predefined_funcs)
elif suggestion['type'] == 'table':
tables = self.populate_schema_objects(suggestion['schema'],
'tables')
tables = self.find_matches(word_before_cursor, tables)
completions.extend(tables)
elif suggestion['type'] == 'view':
views = self.populate_schema_objects(suggestion['schema'],
'views')
views = self.find_matches(word_before_cursor, views)
completions.extend(views)
elif suggestion['type'] == 'alias':
aliases = suggestion['aliases']
aliases = self.find_matches(word_before_cursor, aliases)
completions.extend(aliases)
elif suggestion['type'] in ('database', 'schema'):
dbs = self.find_matches(word_before_cursor, self.databases)
completions.extend(dbs)
elif suggestion['type'] == 'keyword':
keywords = self.find_matches(word_before_cursor, self.keywords,
start_only=True,
fuzzy=False)
completions.extend(keywords)
elif suggestion['type'] == 'show':
show_items = self.find_matches(word_before_cursor,
self.show_items,
start_only=False,
fuzzy=True)
completions.extend(show_items)
elif suggestion['type'] == 'change':
change_items = self.find_matches(word_before_cursor,
self.change_items,
start_only=False,
fuzzy=True)
completions.extend(change_items)
elif suggestion['type'] == 'user':
users = self.find_matches(word_before_cursor, self.users,
start_only=False,
fuzzy=True)
completions.extend(users)
elif suggestion['type'] == 'special':
special = self.find_matches(word_before_cursor,
self.special_commands,
start_only=True,
fuzzy=False)
completions.extend(special)
elif suggestion['type'] == 'favoritequery':
queries = self.find_matches(word_before_cursor,
favoritequeries.list(),
start_only=False, fuzzy=True)
completions.extend(queries)
elif suggestion['type'] == 'table_format':
formats = self.find_matches(word_before_cursor,
self.table_formats,
start_only=True, fuzzy=False)
completions.extend(formats)
return completions
def populate_scoped_cols(self, scoped_tbls):
"""Find all columns in a set of scoped_tables
:param scoped_tbls: list of (schema, table, alias) tuples
:return: list of column names
"""
columns = []
meta = self.dbmetadata
for tbl in scoped_tbls:
# A fully qualified schema.relname reference or default_schema
# DO NOT escape schema names.
schema = tbl[0] or self.dbname
schema = schema.upper()
relname = tbl[1]
escaped_relname = self.escape_name(tbl[1])
# We don't know if schema.relname is a table or view. Since
# tables and views cannot share the same name, we can check one
# at a time
try:
columns.extend(meta['tables'][schema][relname])
# Table exists, so don't bother checking for a view
continue
except KeyError:
try:
columns.extend(meta['tables'][schema][escaped_relname])
# Table exists, so don't bother checking for a view
continue
except KeyError:
pass
try:
columns.extend(meta['views'][schema][relname])
except KeyError:
pass
return columns
def populate_schema_objects(self, schema, obj_type):
"""Returns list of tables or functions for a (optional) schema"""
metadata = self.dbmetadata[obj_type]
schema = schema or self.dbname
schema = schema.upper()
try:
objects = metadata[schema].keys()
except KeyError:
# schema doesn't exist
objects = []
return objects
|
95745
|
from uliweb.core.commands import Command, CommandManager, get_commands
from optparse import make_option
class DataDictCommand(CommandManager):
#change the name to real command name, such as makeapp, makeproject, etc.
name = 'datadict'
#help information
help = "Data dict tool, create index, validate models' of apps or tables"
#args information, used to display show the command usage message
args = ''
#if True, it'll check the current directory should has apps directory
check_apps_dirs = True
#if True, it'll check args parameters should be valid apps name
check_apps = False
#if True, it'll skip not predefined parameters in options_list, otherwise it'll
#complain not the right parameters of the command, it'll used in subcommands or
#passing extra parameters to a special command
skip_options = True
def get_commands(self, global_options):
import datadict_subcommands as subcommands
cmds = get_commands(subcommands)
return cmds
|
95777
|
import json
import re
import urllib.parse
from typing import Tuple, Dict, Union, List, Any, Optional
from lumigo_tracer.libs import xmltodict
import functools
import itertools
from collections.abc import Iterable
from lumigo_tracer.lumigo_utils import Configuration, get_logger
def safe_get(d: Union[dict, list], keys: List[Union[str, int]], default: Any = None) -> Any:
"""
:param d: Should be list or dict, otherwise return default.
:param keys: If keys[i] is int, then it should be a list index. If keys[i] is string, then it should be a dict key.
:param default: If encountered a problem, return default.
:return: d[keys[0]][keys[1]]...
"""
def get_next_val(prev_result, key):
if isinstance(prev_result, dict) and isinstance(key, str):
return prev_result.get(key, default)
elif isinstance(prev_result, list) and isinstance(key, int):
return safe_get_list(prev_result, key, default)
else:
return default
return functools.reduce(get_next_val, keys, d)
def safe_get_list(lst: list, index: Union[int, str], default=None):
"""
This function return the organ in the `index` place from the given list.
If this values doesn't exist, return default.
"""
if isinstance(index, str):
try:
index = int(index)
except ValueError:
return default
if not isinstance(lst, Iterable):
return default
return lst[index] if len(lst) > index else default
def safe_split_get(string: str, sep: str, index: int, default=None) -> str:
"""
This function splits the given string using the sep, and returns the organ in the `index` place.
If such index doesn't exist, returns default.
"""
if not isinstance(string, str):
return default
return safe_get_list(string.split(sep), index, default)
def safe_key_from_json(json_str: bytes, key: object, default=None) -> Union[str, list]:
"""
This function tries to read the given str as json, and returns the value of the desired key.
If the key doesn't found or the input string is not a valid json, returns the default.
"""
try:
return json.loads(json_str).get(key, default)
except json.JSONDecodeError:
return default
def safe_key_from_xml(xml_str: bytes, key: str, default=None):
"""
This function tries to read the given str as XML, and returns the value of the desired key.
If the key doesn't found or the input string is not a valid XML, returns the default.
We accept keys with hierarchy by `/` (i.e. we accept keys with the format `outer/inner`)
If there are some keys with the same name at the same hierarchy, they can be accessed as index in list,
e.g: <a><b>val0</b><b>val1</b></a> will be accessed with "a/b/0" or "a/b/1".
"""
try:
result = functools.reduce(
lambda prev, sub_key: safe_get_list(prev, sub_key)
if isinstance(prev, list)
else prev.get(sub_key, {}),
key.split("/"),
xmltodict.parse(xml_str),
)
return result or default
except xmltodict.expat.ExpatError:
return default
def safe_key_from_query(body: bytes, key: str, default=None) -> str:
"""
This function assumes that the first row in the body is the url arguments.
We assume that the structure of the parameters is as follow:
* character-escaped using urllib.quote
* values separated with '&'
* each item is <key>=<value>
Note: This function decode the given body, therefore duplicate it's size. Be aware to use only in resources
with restricted body length.
"""
return dict(re.findall(r"([^&]+)=([^&]*)", urllib.parse.unquote(body.decode()))).get(
key, default
)
def parse_trace_id(trace_id_str: str) -> Tuple[str, str, str]:
"""
This function parses the trace_id, and result dictionary the describes the data.
We assume the following format:
* values separated with ';'
* each item is <key>=<value>
:param trace_id_str: The string that came from the environment variables.
"""
if not isinstance(trace_id_str, str):
return "", "", ""
trace_id_parameters = dict(re.findall(r"([^;]+)=([^;]*)", trace_id_str))
root = trace_id_parameters.get("Root", "")
root_end_index = trace_id_str.find(";")
suffix = trace_id_str[root_end_index:] if ";" in trace_id_str else trace_id_str
return root, safe_split_get(root, "-", 2, default=""), suffix
def recursive_json_join(d1: Optional[dict], d2: Optional[dict]):
"""
This function return the recursive joint dictionary, which means that for every (item, key) in the result
dictionary it holds that:
* if key in d1 and is not dictionary, then the value is d1[key]
* if key in d2 and is not dictionary, then the value is d2[key]
* otherwise, join d1[key] and d2[key]
"""
if d1 is None or d2 is None:
return d1 or d2
d = {}
for key in set(itertools.chain(d1.keys(), d2.keys())):
value = d1.get(key, d2.get(key))
if isinstance(value, dict):
d[key] = recursive_json_join(d1.get(key), d2.get(key)) # type: ignore
else:
d[key] = value
return d
def should_scrub_domain(url: str) -> bool:
if url and Configuration.domains_scrubber:
for regex in Configuration.domains_scrubber:
if regex.match(url):
return True
return False
def str_to_list(val: str) -> Optional[List[str]]:
try:
if val:
return val.split(",")
except Exception as e:
get_logger().debug("Error while convert str to list", exc_info=e)
return None
def str_to_tuple(val: str) -> Optional[Tuple]:
try:
if val:
return tuple(val.split(","))
except Exception as e:
get_logger().debug("Error while convert str to tuple", exc_info=e)
return None
def recursive_get_key(d: Union[List, Dict[str, Union[Dict, str]]], key, depth=None, default=None):
if depth is None:
depth = Configuration.get_key_depth
if depth == 0:
return default
if key in d:
return d[key]
if isinstance(d, list):
for v in d:
recursive_result = recursive_get_key(v, key, depth - 1, default)
if recursive_result:
return recursive_result
if isinstance(d, dict):
for v in d.values():
if isinstance(v, (list, dict)):
recursive_result = recursive_get_key(v, key, depth - 1, default)
if recursive_result:
return recursive_result
return default
def extract_function_name_from_arn(arn: str) -> str:
return safe_split_get(arn, ":", 6)
|
95795
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from astroid import MANAGER, Class, Instance, Function, Arguments, Pass
def transform_model_class(cls):
if cls.is_subtype_of('django.db.models.base.Model'):
core_exceptions = MANAGER.ast_from_module_name('django.core.exceptions')
# add DoesNotExist exception
DoesNotExist = Class('DoesNotExist', None)
DoesNotExist.bases = core_exceptions.lookup('ObjectDoesNotExist')[1]
cls.locals['DoesNotExist'] = [DoesNotExist]
# add MultipleObjectsReturned exception
MultipleObjectsReturned = Class('MultipleObjectsReturned', None)
MultipleObjectsReturned.bases = core_exceptions.lookup(
'MultipleObjectsReturned')[1]
cls.locals['MultipleObjectsReturned'] = [MultipleObjectsReturned]
# add objects manager
if 'objects' not in cls.locals:
try:
Manager = MANAGER.ast_from_module_name(
'django.db.models.manager').lookup('Manager')[1][0]
QuerySet = MANAGER.ast_from_module_name(
'django.db.models.query').lookup('QuerySet')[1][0]
except IndexError:
pass
else:
if isinstance(Manager.body[0], Pass):
# for django >= 1.7
for func_name, func_list in QuerySet.locals.items():
if (not func_name.startswith('_') and
func_name not in Manager.locals):
func = func_list[0]
if (isinstance(func, Function) and
'queryset_only' not in func.instance_attrs):
f = Function(func_name, None)
f.args = Arguments()
Manager.locals[func_name] = [f]
cls.locals['objects'] = [Instance(Manager)]
# add id field
if 'id' not in cls.locals:
try:
AutoField = MANAGER.ast_from_module_name(
'django.db.models.fields').lookup('AutoField')[1][0]
except IndexError:
pass
else:
cls.locals['id'] = [Instance(AutoField)]
|
95822
|
import functools
import hashlib
import json
import os
import time
import typing
from collections import namedtuple
from cauldron import environ
from cauldron.session import definitions as file_definitions
from cauldron.session import writing
from cauldron.session.caching import SharedCache
from cauldron.session.projects import definitions
from cauldron.session.projects import steps
from cauldron.session.report import Report
DEFAULT_SCHEME = 'S{{##}}-{{name}}.{{ext}}'
StopCondition = namedtuple('StopCondition', ['aborted', 'halt'])
class Project:
"""..."""
def __init__(
self,
source_directory: str,
results_path: str = None,
shared: typing.Union[dict, SharedCache] = None
):
"""
:param source_directory:
:param results_path:
[optional] The path where the results files for the project will
be saved. If omitted, the default global results path will be
used.
:param shared:
[optional] The shared data cache used to store project data when
run
"""
source_directory = environ.paths.clean(source_directory)
if os.path.isfile(source_directory):
source_directory = os.path.dirname(source_directory)
self.source_directory = source_directory
self.steps = [] # type: typing.List[steps.ProjectStep]
self._results_path = results_path # type: str
self._current_step = None # type: steps.ProjectStep
self.last_modified = None
self.remote_source_directory = None # type: str
def as_shared_cache(source):
if source and not hasattr(source, 'fetch'):
return SharedCache().put(**source)
return source or SharedCache()
self.stop_condition = StopCondition(False, False) # type: StopCondition
self.shared = as_shared_cache(shared)
self.settings = SharedCache()
self.refresh()
@property
def uuid(self) -> str:
"""
The unique identifier for the project among all other projects, which
is based on a hashing of the project's source path to prevent naming
collisions when storing project information from multiple projects in
the same directory (e.g. common results directory).
"""
return hashlib.sha1(self.source_path.encode()).hexdigest()
@property
def is_remote_project(self) -> bool:
"""Whether or not this project is remote"""
project_path = environ.paths.clean(self.source_directory)
return project_path.find('cd-remote-project') != -1
@property
def library_directories(self) -> typing.List[str]:
"""
The list of directories to all of the library locations
"""
def listify(value):
return [value] if isinstance(value, str) else list(value)
# If this is a project running remotely remove external library
# folders as the remote shared libraries folder will contain all
# of the necessary dependencies
is_local_project = not self.is_remote_project
folders = [
f
for f in listify(self.settings.fetch('library_folders', ['libs']))
if is_local_project or not f.startswith('..')
]
# Include the remote shared library folder as well
folders.append('../__cauldron_shared_libs')
# Include the project directory as well
folders.append(self.source_directory)
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
]
@property
def asset_directories(self):
"""..."""
def listify(value):
return [value] if isinstance(value, str) else list(value)
folders = listify(self.settings.fetch('asset_folders', ['assets']))
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
]
@property
def has_error(self):
"""..."""
for s in self.steps:
if s.error:
return True
return False
@property
def title(self) -> str:
out = self.settings.fetch('title')
if out:
return out
out = self.settings.fetch('name')
if out:
return out
return self.id
@title.setter
def title(self, value: str):
self.settings.title = value
@property
def id(self) -> str:
return self.settings.fetch('id', 'unknown')
@property
def naming_scheme(self) -> str:
return self.settings.fetch('naming_scheme', None)
@naming_scheme.setter
def naming_scheme(self, value: typing.Union[str, None]):
self.settings.put(naming_scheme=value)
@property
def current_step(self) -> typing.Union['steps.ProjectStep', None]:
if len(self.steps) < 1:
return None
step = self._current_step
return step if step else self.steps[0]
@current_step.setter
def current_step(self, value: typing.Union[Report, None]):
self._current_step = value
@property
def source_path(self) -> typing.Union[None, str]:
directory = self.source_directory
return os.path.join(directory, 'cauldron.json') if directory else None
@property
def results_path(self) -> str:
"""The path where the project results will be written"""
def possible_paths():
yield self._results_path
yield self.settings.fetch('path_results')
yield environ.configs.fetch('results_directory')
yield environ.paths.results(self.uuid)
return next(p for p in possible_paths() if p is not None)
@results_path.setter
def results_path(self, value: str):
self._results_path = environ.paths.clean(value)
@property
def url(self) -> str:
"""
Returns the URL that will open this project results file in the browser
:return:
"""
return 'file://{path}?id={id}'.format(
path=os.path.join(self.results_path, 'project.html'),
id=self.uuid
)
@property
def baked_url(self) -> str:
"""
Returns the URL that will open this project results file in the browser
with the loading information baked into the file so that no URL
parameters are needed to view it, which is needed on platforms like
windows
"""
return 'file://{path}'.format(
path=os.path.join(self.results_path, 'display.html'),
id=self.uuid
)
@property
def output_directory(self) -> str:
"""
Returns the directory where the project results files will be written
"""
return os.path.join(self.results_path, 'reports', self.uuid, 'latest')
@property
def output_path(self) -> str:
"""
Returns the full path to where the results.js file will be written
:return:
"""
return os.path.join(self.output_directory, 'results.js')
def select_step(
self,
step_name_or_index: typing.Union[str, int, 'steps.ProjectStep']
) -> typing.Optional['steps.ProjectStep']:
"""
Selects the specified step by step object, step name or index if
such a step exists and returns that step if it does.
"""
if isinstance(step_name_or_index, steps.ProjectStep):
step = (
step_name_or_index
if step_name_or_index in self.steps
else None
)
elif isinstance(step_name_or_index, int):
index = min(len(self.steps) - 1, step_name_or_index)
step = self.steps[index]
else:
step = self.get_step(step_name_or_index or '')
if not step:
return None
for s in self.steps:
s.is_selected = (s == step)
return step
def make_remote_url(self, host: str = None):
"""..."""
clean_host = (host or '').rstrip('/')
return '{}/view/project.html?id={}'.format(clean_host, self.uuid)
def kernel_serialize(self):
"""..."""
return dict(
uuid=self.uuid,
stop_condition=self.stop_condition._asdict(),
last_modified=self.last_modified,
remote_source_directory=self.remote_source_directory,
source_directory=self.source_directory,
source_path=self.source_path,
output_directory=self.output_directory,
output_path=self.output_path,
url=self.url,
remote_slug=self.make_remote_url(),
title=self.title,
id=self.id,
steps=[s.kernel_serialize() for s in self.steps],
naming_scheme=self.naming_scheme
)
def refresh(self, force: bool = False) -> bool:
"""
Loads the cauldron.json definition file for the project and populates
the project with the loaded data. Any existing data will be overwritten,
if the new definition file differs from the previous one.
If the project has already loaded with the most recent version of the
cauldron.json file, this method will return without making any changes
to the project.
:param force:
If true the project will be refreshed even if the project file
modified timestamp doesn't indicate that it needs to be refreshed.
:return:
Whether or not a refresh was needed and carried out
"""
lm = self.last_modified
is_newer = lm is not None and lm >= os.path.getmtime(self.source_path)
if not force and is_newer:
return False
old_definition = self.settings.fetch(None)
new_definition = definitions.load_project_definition(
self.source_directory
)
if not force and old_definition == new_definition:
return False
self.settings.clear().put(**new_definition)
old_step_definitions = old_definition.get('steps', [])
new_step_definitions = new_definition.get('steps', [])
if not force and old_step_definitions == new_step_definitions:
return True
old_steps = self.steps
self.steps = []
for step_data in new_step_definitions:
matches = [s for s in old_step_definitions if s == step_data]
if len(matches) > 0:
index = old_step_definitions.index(matches[0])
self.steps.append(old_steps[index])
else:
self.add_step(step_data)
self.last_modified = time.time()
return True
def get_step(self, name: str) -> typing.Optional['steps.ProjectStep']:
"""Returns the step by name or None if no such step is found."""
for s in self.steps:
if s.definition.name == name:
return s
return None
def get_step_by_reference_id(
self,
reference_id: str
) -> typing.Union['steps.ProjectStep', None]:
"""Returns the step by its ID or None if no such step is found."""
for s in self.steps:
if s.reference_id == reference_id:
return s
return None
def index_of_step(self, name) -> typing.Union[int, None]:
"""
:param name:
:return:
"""
name = name.strip('"')
for index, s in enumerate(self.steps):
if s.definition.name == name:
return int(index)
return None
def add_step(
self,
step_data: typing.Union[str, dict],
index: int = None
) -> typing.Union['steps.ProjectStep', None]:
"""
:param step_data:
:param index:
:return:
"""
fd = file_definitions.FileDefinition(
data=step_data,
project=self,
project_folder=functools.partial(
self.settings.fetch,
'steps_folder'
)
)
if not fd.name:
self.last_modified = 0
return None
ps = steps.ProjectStep(self, fd)
if index is None:
self.steps.append(ps)
else:
if index < 0:
index %= len(self.steps)
self.steps.insert(index, ps)
if fd.name.endswith('.py'):
for i in range(self.steps.index(ps) + 1, len(self.steps)):
self.steps[i].mark_dirty(True)
self.last_modified = time.time()
return ps
def remove_step(self, name) -> typing.Union['steps.ProjectStep', None]:
"""
:param name:
:return:
"""
step = None
for ps in self.steps:
if ps.definition.name == name:
step = ps
break
if step is None:
return None
if step.definition.name.endswith('.py'):
for i in range(self.steps.index(step) + 1, len(self.steps)):
self.steps[i].mark_dirty(True)
self.steps.remove(step)
return step
def save(self, path: str = None):
"""
:param path:
:return:
"""
if not path:
path = self.source_path
self.settings.put(
steps=[ps.definition.serialize() for ps in self.steps]
)
data = self.settings.fetch(None)
with open(path, 'w+') as f:
json.dump(data, f, indent=2, sort_keys=True)
self.last_modified = time.time()
def write(self) -> str:
"""..."""
writing.save(self)
return self.url
def status(self) -> dict:
return dict(
id=self.id,
steps=[s.status() for s in self.steps],
stop_condition=self.stop_condition._asdict(),
last_modified=self.last_modified,
remote_slug=self.make_remote_url()
)
|
95856
|
from typing import List
import ast
import logging
from ..module import Module, SafeFilenameModule
from .path import ImportPath
logger = logging.getLogger(__name__)
class DependencyAnalyzer:
"""
Analyzes a set of Python modules for imports between them.
Args:
modules: list of all SafeFilenameModules that make up the package.
package: the Python package that contains all the modules.
Usage:
analyzer = DependencyAnalyzer(modules)
import_paths = analyzer.determine_import_paths()
"""
def __init__(self, modules: List[SafeFilenameModule], package: Module) -> None:
self.modules = modules
self.package = package
def determine_import_paths(self) -> List[ImportPath]:
"""
Return a list of the ImportPaths for all the modules.
"""
import_paths: List[ImportPath] = []
for module in self.modules:
import_paths.extend(
self._determine_import_paths_for_module(module)
)
return import_paths
def _determine_import_paths_for_module(self, module: SafeFilenameModule) -> List[ImportPath]:
"""
Return a list of all the ImportPaths for which a given Module is the importer.
"""
import_paths: List[ImportPath] = []
imported_modules = self._get_imported_modules(module)
for imported_module in imported_modules:
import_paths.append(
ImportPath(
importer=module,
imported=imported_module
)
)
return import_paths
def _get_imported_modules(self, module: SafeFilenameModule) -> List[Module]:
"""
Statically analyses the given module and returns a list of Modules that it imports.
Note: this method only analyses the module in question and will not load any other code,
so it relies on self.modules to deduce which modules it imports. (This is because you
can't know whether "from foo.bar import baz" is importing a module called `baz`,
or a function `baz` from the module `bar`.)
"""
imported_modules = []
with open(module.filename) as file:
module_contents = file.read()
ast_tree = ast.parse(module_contents)
for node in ast.walk(ast_tree):
if isinstance(node, ast.ImportFrom):
# Parsing something in the form 'from x import ...'.
assert isinstance(node.level, int)
if node.level == 0:
# Absolute import.
# Let the type checker know we expect node.module to be set here.
assert isinstance(node.module, str)
if not node.module.startswith(self.package.name):
# Don't include imports of modules outside this package.
continue
module_base = node.module
elif node.level >= 1:
# Relative import. The level corresponds to how high up the tree it goes;
# for example 'from ... import foo' would be level 3.
importing_module_components = module.name.split('.')
# TODO: handle level that is too high.
# Trim the base module by the number of levels.
if module.filename.endswith('__init__.py'):
# If the scanned module an __init__.py file, we don't want
# to go up an extra level.
number_of_levels_to_trim_by = node.level - 1
else:
number_of_levels_to_trim_by = node.level
if number_of_levels_to_trim_by:
module_base = '.'.join(
importing_module_components[:-number_of_levels_to_trim_by]
)
else:
module_base = '.'.join(importing_module_components)
if node.module:
module_base = '.'.join([module_base, node.module])
# node.names corresponds to 'a', 'b' and 'c' in 'from x import a, b, c'.
for alias in node.names:
full_module_name = '.'.join([module_base, alias.name])
imported_modules.append(Module(full_module_name))
elif isinstance(node, ast.Import):
# Parsing a line in the form 'import x'.
for alias in node.names:
if not alias.name.startswith(self.package.name):
# Don't include imports of modules outside this package.
continue
imported_modules.append(Module(alias.name))
else:
# Not an import statement; move on.
continue
imported_modules = self._trim_each_to_known_modules(imported_modules)
return imported_modules
def _trim_each_to_known_modules(self, imported_modules: List[Module]) -> List[Module]:
known_modules = []
for imported_module in imported_modules:
if imported_module in self.modules:
known_modules.append(imported_module)
else:
# The module isn't in the known modules. This is because it's something *within*
# a module (e.g. a function): the result of something like 'from .subpackage
# import my_function'. So we trim the components back to the module.
components = imported_module.name.split('.')[:-1]
trimmed_module = Module('.'.join(components))
if trimmed_module in self.modules:
known_modules.append(trimmed_module)
else:
# TODO: we may want to warn the user about this.
logger.debug('{} not found in modules.'.format(trimmed_module))
return known_modules
|
95899
|
import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import bijectors as tfb
from tensorflow_probability import distributions as tfd
tf.enable_v2_behavior()
warnings.filterwarnings('ignore')
def probabilistic_pca(data_dim, latent_dim, num_datapoints, stddv_datapoints, w):
# w = yield tfd.Normal(loc=tf.zeros([data_dim, latent_dim]),
# scale=2.0 * tf.ones([data_dim, latent_dim]),
# name="w")
z = yield tfd.Normal(loc=tf.zeros([latent_dim, num_datapoints]),
scale=tf.ones([latent_dim, num_datapoints]),
name="z")
x = yield tfd.Normal(loc=tf.matmul(w, z),
scale=stddv_datapoints,
name="x")
num_datapoints = 500
data_dim = 2
latent_dim = 1
stddv_datapoints = 0.5
# w = tf.Variable(np.random.normal(size=[data_dim, latent_dim]).astype(np.float32))
w_true = tf.Variable(np.array([[5.], [5.]]).astype(np.float32))
concrete_ppca_model = functools.partial(probabilistic_pca,
data_dim=data_dim,
latent_dim=latent_dim,
num_datapoints=num_datapoints,
stddv_datapoints=stddv_datapoints,
w=w_true)
model = tfd.JointDistributionCoroutineAutoBatched(concrete_ppca_model)
actual_z, x_train = model.sample()
w = tf.Variable(tf.random.normal([data_dim, latent_dim]))
print(w)
concrete_ppca_model = functools.partial(probabilistic_pca,
data_dim=data_dim,
latent_dim=latent_dim,
num_datapoints=num_datapoints,
stddv_datapoints=stddv_datapoints,
w=w)
model = tfd.JointDistributionCoroutineAutoBatched(concrete_ppca_model)
target_log_prob_fn = lambda z: model.log_prob((z, x_train))
# qw_mean = tf.Variable(tf.random.normal([data_dim, latent_dim]))
qz_mean = tf.Variable(tf.random.normal([latent_dim, num_datapoints]))
# qw_stddv = tfp.util.TransformedVariable(1e-4 * tf.ones([data_dim, latent_dim]),
# bijector=tfb.Softplus())
qz_stddv = tfp.util.TransformedVariable(
1e-4 * tf.ones([latent_dim, num_datapoints]),
bijector=tfb.Softplus())
def factored_normal_variational_model():
# qw = yield tfd.Normal(loc=qw_mean, scale=qw_stddv, name="qw")
qz = yield tfd.Normal(loc=qz_mean, scale=qz_stddv, name="qz")
surrogate_posterior = tfd.JointDistributionCoroutineAutoBatched(
factored_normal_variational_model)
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
optimizer=tf.optimizers.Adam(learning_rate=0.05),
num_steps=1000)
print(w)
plt.scatter(x_train.numpy()[0, :], x_train.numpy()[1, :])
plt.axis([-20, 20, -20, 20])
plt.show()
import ipdb; ipdb.set_trace()
|
95900
|
import vlc
import time
vlc_instance = vlc.Instance('--input-repeat=-1')
player = vlc_instance.media_player_new()
media = vlc_instance.media_new(r"main\tracks\TRACK_1.mp3")
player.set_media(media)
player.play()
time.sleep(10)
|
95901
|
from django.shortcuts import render, get_object_or_404
from django.views.generic.base import View
from .models import Pages
class Page(View):
"""Вывод страниц"""
def get(self, request, slug=None):
if slug is not None:
page = get_object_or_404(Pages, slug=slug, published=True)
else:
page = get_object_or_404(Pages, slug__isnull=True, published=True)
return render(request, page.template, {"page": page})
|
95917
|
import joblib
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch import nn, optim
import matplotlib.pyplot as plt
X_train = joblib.load('ch08/X_train.joblib')
y_train = joblib.load('ch08/y_train.joblib')
X_train = torch.from_numpy(X_train.astype(np.float32)).clone()
y_train = torch.from_numpy(y_train.astype(np.int64)).clone()
X_valid = joblib.load('ch08/X_valid.joblib')
y_valid = joblib.load('ch08/y_valid.joblib')
X_valid = torch.from_numpy(X_valid.astype(np.float32)).clone()
y_valid = torch.from_numpy(y_valid.astype(np.int64)).clone()
X_test = joblib.load('ch08/X_test.joblib')
y_test = joblib.load('ch08/y_test.joblib')
X_test = torch.from_numpy(X_test.astype(np.float32)).clone()
y_test = torch.from_numpy(y_test.astype(np.int64)).clone()
X = X_train
y = y_train
X = X.to('cuda:0')
y = y.to('cuda:0')
ds = TensorDataset(X, y)
net = nn.Linear(X.size()[1], 4)
net = net.to('cuda:0')
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)
batchSize = [1, 2, 4, 8]
for bs in batchSize:
loader = DataLoader(ds, batch_size=bs, shuffle=True)
train_losses = []
valid_losses = []
train_accs = []
valid_accs = []
for epoc in tqdm(range(100)):
train_running_loss = 0.0
valid_running_loss = 0.0
for xx, yy in loader:
y_pred = net(xx)
loss = loss_fn(y_pred, yy)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_running_loss += loss.item()
valid_running_loss += loss_fn(net(X_valid), y_valid).item()
joblib.dump(net.state_dict(), f'ch08/state_dict_{epoc}.joblib')
train_losses.append(train_running_loss)
valid_losses.append(valid_running_loss)
_, y_pred_train = torch.max(net(X), 1)
train_accs.append((y_pred_train == y).sum().item() / len(y))
_, y_pred_valid = torch.max(net(X_valid), 1)
valid_accs.append((y_pred_valid == y_valid).sum().item() / len(y_valid))
plt.plot(train_losses, label='train loss')
plt.plot(valid_losses, label='valid loss')
plt.legend()
plt.show()
plt.plot(train_accs, label='train acc')
plt.plot(valid_accs, label='valid acc')
plt.legend()
plt.show()
|
95921
|
import random
from time import sleep
from celery import chain, chord, shared_task, states
from django.core.files.base import ContentFile
from django.db import transaction
from django.utils import timezone
from eulxml import xmlmap
from extras.tasks import CurrentUserTaskMixin
from ows_client.request_builder import CatalogueServiceWeb
from registry.enums.service import HttpMethodEnum, OGCOperationEnum
from registry.models import DatasetMetadata, OperationUrl, Service
from registry.models.harvest import HarvestResult
from registry.xmlmapper.ogc.csw_get_record_response import GetRecordsResponse
MAX_RECORDS_TEST_LIST = [50, 100, 200, 400]
@shared_task(name="async_harvest_service",
bind=True,
base=CurrentUserTaskMixin)
def harvest_service(self,
service,
**kwargs):
_calibrate_step_size = chord(
[get_response_elapsed.s(service, test_max_records, **kwargs)
for test_max_records in MAX_RECORDS_TEST_LIST],
calibrate_step_size.s(**kwargs))
workflow = chain(_calibrate_step_size,
schedule_get_records.s(service, **kwargs))
workflow.apply_async()
return self.job.pk
@shared_task(name="async_schedule_get_records",
bind=True,
base=CurrentUserTaskMixin)
def schedule_get_records(self,
step_size,
service_id,
**kwargs):
db_service = Service.objects.get(pk=service_id)
get_records_url = OperationUrl.objects.values_list("url", flat=True).get(service__id=service_id,
operation=OGCOperationEnum.GET_RECORDS.value,
method=HttpMethodEnum.GET.value)
remote_service = CatalogueServiceWeb(base_url=get_records_url,
version=db_service.service_version)
request = remote_service.get_get_records_request(**{remote_service.TYPE_NAME_QP: "gmd:MD_Metadata",
remote_service.OUTPUT_SCHEMA_QP: "http://www.isotc211.org/2005/gmd",
remote_service.RESULT_TYPE_QP: "hits"})
session = db_service.get_session_for_request()
response = session.send(request.prepare())
get_records_xml = xmlmap.load_xmlobject_from_string(string=response.content,
xmlclass=GetRecordsResponse)
max_records = get_records_xml.total_records
round_trips = (max_records // step_size)
if max_records % step_size > 0:
round_trips += 1
progress_step_size = 100 / round_trips
if self.task:
self.task.phase = f"collecting {max_records} records with step size {step_size} in {round_trips} requests: 0/{round_trips}"
self.task.save()
get_record_tasks = []
for round_trip in range(round_trips):
start_position = round_trip * step_size + 1
get_record_tasks.append(
get_records.s(service_id, max_records, step_size, start_position, progress_step_size, **kwargs))
header = get_record_tasks
callback = analyze_results.s(service_id, max_records, **kwargs)
chord(header)(callback)
@shared_task(name="async_calibrate_step_size")
def calibrate_step_size(test_results,
**kwargs):
best_result = None
for _step_size, elapsed_time in test_results:
if not best_result:
best_result = _step_size, elapsed_time
else:
if best_result[1] == -1:
# The used step_size runs in an error. So we can't use it.
continue
if elapsed_time >= 2 * best_result[1]:
break
else:
best_result = _step_size, elapsed_time
return best_result[0]
@shared_task(name="async_get_response_elapse",
bind=True,
base=CurrentUserTaskMixin)
def get_response_elapsed(self,
service_id,
test_max_records,
**kwargs):
if self.task:
self.task.status = states.STARTED
self.task.phase = f"Start analyzing elapsing time of the request for maxRecords query parameter '{test_max_records}'"
self.task.save()
db_service = Service.objects.get(pk=service_id)
get_records_url = OperationUrl.objects.values_list("url", flat=True).get(service__id=service_id,
operation=OGCOperationEnum.GET_RECORDS.value,
method=HttpMethodEnum.GET.value)
remote_service = CatalogueServiceWeb(base_url=get_records_url,
version=db_service.service_version)
request = remote_service.get_get_records_request(**{remote_service.TYPE_NAME_QP: "gmd:MD_Metadata",
remote_service.OUTPUT_SCHEMA_QP: "http://www.isotc211.org/2005/gmd",
remote_service.RESULT_TYPE_QP: "results",
remote_service.MAX_RECORDS_QP: test_max_records,
remote_service.START_POSITION_QP: 1})
session = db_service.get_session_for_request()
response = session.send(request.prepare())
get_records_xml = xmlmap.load_xmlobject_from_string(string=response.content,
xmlclass=GetRecordsResponse)
try:
if isinstance(get_records_xml.total_records, int) and isinstance(get_records_xml.returned_records, int):
pass
elapsed = response.elapsed.total_seconds()
except Exception:
elapsed = -1
if self.task:
self.task.status = states.SUCCESS
self.task.phase = f"Elapsing time for maxRecords query parameter '{test_max_records}': {elapsed}"
self.task.progress = 100
self.task.save()
return test_max_records, elapsed
@shared_task(name="async_get_records",
bind=True,
base=CurrentUserTaskMixin,
queue="harvest")
def get_records(self,
service_id,
max_records,
step_size,
start_position,
progress_step_size,
**kwargs):
sleep(random.uniform(0.1, 0.9))
db_service = Service.objects.get(pk=service_id)
get_records_url = OperationUrl.objects.values_list("url", flat=True).get(service__id=service_id,
operation=OGCOperationEnum.GET_RECORDS.value,
method=HttpMethodEnum.GET.value)
remote_service = CatalogueServiceWeb(base_url=get_records_url,
version=db_service.service_version)
request = remote_service.get_get_records_request(**{remote_service.TYPE_NAME_QP: "gmd:MD_Metadata",
remote_service.OUTPUT_SCHEMA_QP: "http://www.isotc211.org/2005/gmd",
remote_service.RESULT_TYPE_QP: "results",
remote_service.MAX_RECORDS_QP: step_size,
remote_service.START_POSITION_QP: start_position})
session = db_service.get_session_for_request()
response = session.send(request.prepare())
content_type = response.headers.get("content-type")
if "/" in content_type:
content_type = content_type.split("/")[-1]
result = HarvestResult.objects.create(service=Service.objects.get(id=service_id),
job=self.task.job)
result.result_file.save(
name=f'{start_position}_to_{start_position + step_size - 1}_of_{max_records}.{content_type}',
content=ContentFile(response.text))
if self.task:
# CAREFULLY!!!: this is a race condition in parallel execution, cause all tasks will waiting for the task
# which looks the pending task for updating progress and phase.
with transaction.atomic():
cls = self.task.__class__
task = cls.objects.select_for_update().get(pk=self.task.pk)
if not task.started_at:
task.started_at = timezone.now()
task.status = states.STARTED
task.progress += progress_step_size / 2
try:
phase = task.phase.split(":")
current_phase = phase[0]
phase_steps = phase[-1].split("/")
task.phase = f"{current_phase}: {int(phase_steps[0]) + 1}/{phase_steps[-1]}"
except Exception:
pass
task.save()
return result.pk
@shared_task(name="async_analyze_records",
bind=True,
base=CurrentUserTaskMixin,
queue="harvest")
def analyze_results(self,
harvest_results,
service_id,
total_records,
**kwargs):
if self.task:
self.task.status = states.STARTED
self.task.phase = f"Persisting downloaded records: 0 / {total_records}"
self.task.save()
service = Service.objects.get(pk=service_id)
results = HarvestResult.objects.filter(id__in=harvest_results)
dataset_list = []
progress_step_size = 100 / total_records / 2
for result in results:
xml = result.parse()
for md_metadata in xml.records:
if self.task:
self.task.progress += progress_step_size
try:
phase = self.task.phase.split(":")
current_phase = phase[0]
phase_steps = phase[-1].split("/")
self.task.phase = f"{current_phase}: {int(phase_steps[0]) + 1}/{phase_steps[-1]}"
except Exception:
pass
self.task.save()
try:
if md_metadata.hierarchy_level == "dataset":
# todo: "tile", "series", ==> dataset
# todo: "service"
# todo: "application"
# todo: "nonGeographicDataset"
dataset = DatasetMetadata.iso_metadata.create_from_parsed_metadata(parsed_metadata=md_metadata,
related_object=service,
origin_url=None)
dataset_list.append(dataset.pk)
except Exception:
# TODO: log the exception
pass
if self.task:
self.task.status = states.SUCCESS
self.task.done_at = timezone.now()
self.task.phase = f'Done. <a href="{DatasetMetadata.get_table_url()}?id__in={",".join(str(pk) for pk in dataset_list)}">dataset metadata</a>'
self.task.save()
return dataset_list
|
95952
|
from pddlgym.parser import PDDLDomainParser, PDDLProblemParser
from pddlgym.structs import LiteralConjunction
import pddlgym
import os
import numpy as np
from itertools import count
np.random.seed(0)
PDDLDIR = os.path.join(os.path.dirname(pddlgym.__file__), "pddl")
I, G, W, P, X, H = range(6)
TRAIN_GRID1 = np.array([
[I, P, P, P, X, X, X, W, W, G],
[W, W, X, P, X, W, W, X, X, P],
[W, W, X, P, X, X, W, X, X, P],
[W, W, X, P, X, X, X, X, X, P],
[W, W, X, P, X, W, W, X, X, P],
[W, W, X, P, X, W, W, X, X, P],
[W, W, X, P, X, X, W, X, X, P],
[W, W, X, P, H, P, P, H, P, P],
])
TRAIN_GRID2 = np.array([
[X, X, X, X, X, X, W, W, W, W],
[X, X, X, X, X, X, W, W, W, W],
[P, P, H, P, H, P, P, P, P, P],
[P, X, X, X, X, X, W, W, X, P],
[P, X, X, X, X, X, X, X, W, G],
[P, W, X, X, W, W, X, W, W, X],
[P, X, X, X, W, X, X, X, X, X],
[P, I, W, X, X, X, X, W, X, X],
])
TRAIN_GRID3 = np.array([
[X, P, P, P, P, P, P, P, X, X],
[X, H, X, X, W, W, X, P, X, X],
[X, P, X, X, W, W, X, G, X, X],
[X, P, X, X, X, X, X, X, X, X],
[I, P, X, X, W, W, X, X, X, X],
[X, X, X, X, X, X, X, X, X, X],
[X, X, X, X, W, W, X, X, X, X],
[X, X, X, X, W, W, X, X, X, X],
[X, X, X, X, X, X, X, X, X, X],
])
TRAIN_GRID4 = np.flipud(TRAIN_GRID3)
GRID1 = np.array([
[I, P, P, P, P],
[W, X, W, W, P],
[X, X, X, W, H],
[X, W, X, W, P],
[W, X, X, W, P],
[W, X, W, W, P],
[G, P, P, H, P],
])
GRID2 = np.array([
[P, P, I, X, X],
[P, W, W, W, X],
[P, W, W, X, X],
[H, W, X, X, W],
[P, W, X, X, X],
[P, W, W, W, W],
[P, P, G, W, W],
])
GRID3 = np.array([
[I, P, P, P, P, H, P, P, P, P,],
[X, X, W, W, X, X, X, W, W, P,],
[X, X, X, W, W, X, X, W, W, P,],
[W, X, X, W, W, X, X, X, W, P,],
[W, X, X, W, W, X, W, X, W, H,],
[W, X, X, W, W, X, W, X, W, P,],
[X, X, X, X, X, X, W, X, X, P,],
[X, X, X, W, W, X, W, W, X, P,],
[W, X, W, W, W, X, W, W, W, P,],
[W, X, X, W, W, X, W, W, W, P,],
[W, X, X, W, W, X, G, P, P, P,],
])
GRID4 = np.array([
[X, X, W, X, X, X, X, X, X, X, X, X, X, X, X, X],
[X, X, W, W, X, X, X, X, X, X, W, W, X, X, W, W],
[X, X, X, X, W, X, X, X, X, X, X, W, X, X, W, W],
[X, X, X, X, W, W, W, X, X, X, X, X, X, X, X, X],
[X, X, X, X, W, X, X, X, X, X, X, X, X, X, X, X],
[X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X],
[X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X],
[P, P, P, H, P, P, P, P, P, P, P, H, P, P, P, P],
[P, W, X, X, X, X, W, X, X, W, X, W, X, X, W, P],
[P, W, W, X, X, X, W, X, X, W, X, W, X, X, W, P],
[P, X, X, X, X, X, W, X, W, W, W, W, X, X, W, P],
[I, X, X, X, X, W, W, W, W, W, W, W, X, X, W, G],
])
GRID5 = np.array([
[G, P, P, P, W, W, W, W, W, W, X],
[X, X, X, P, W, W, W, W, W, W, X],
[X, X, X, P, W, W, W, W, W, W, X],
[P, P, P, P, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[H, X, X, X, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[P, X, X, X, W, W, W, W, W, W, X],
[I, X, X, X, W, W, W, W, W, W, X],
])
TRAIN_GRIDS = [TRAIN_GRID1, TRAIN_GRID2, TRAIN_GRID3, TRAIN_GRID4]
TEST_GRIDS = [GRID1, GRID2, GRID3, GRID4, GRID5]
def create_problem(grid, domain, problem_dir, problem_outfile):
# Create location objects
loc_type = domain.types['loc']
objects = set()
grid_locs = np.empty(grid.shape, dtype=object)
for r in range(grid.shape[0]):
for c in range(grid.shape[1]):
obj = loc_type(f'r{r}_c{c}')
objects.add(obj)
grid_locs[r, c] = obj
initial_state = set()
# Add at, isWater, isHill, isGoal
at = domain.predicates['at']
isWater = domain.predicates['iswater']
isHill = domain.predicates['ishill']
isGoal = domain.predicates['isgoal']
for r in range(grid.shape[0]):
for c in range(grid.shape[1]):
obj = grid_locs[r, c]
if grid[r, c] == I:
initial_state.add(at(obj))
elif grid[r, c] == W:
initial_state.add(isWater(obj))
elif grid[r, c] == H:
initial_state.add(isHill(obj))
elif grid[r, c] == G:
initial_state.add(isGoal(obj))
# Add adjacent
adjacent = domain.predicates['adjacent']
def get_neighbors(r, c):
for dr, dc in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
nr = r + dr
nc = c + dc
if 0 <= nr < grid.shape[0] and 0 <= nc < grid.shape[1]:
yield (nr, nc)
for r in range(grid.shape[0]):
for c in range(grid.shape[1]):
obj = grid_locs[r, c]
for (nr, nc) in get_neighbors(r, c):
nobj = grid_locs[nr, nc]
initial_state.add(adjacent(obj, nobj))
# Add onTrail
onTrail = domain.predicates['ontrail']
# Get the path
path = []
r, c = np.argwhere(grid == I)[0]
while True:
path.append((r, c))
if grid[r, c] == G:
break
for (nr, nc) in get_neighbors(r, c):
if (nr, nc) in path:
continue
if grid[nr, nc] in [P, G, H]:
r, c = nr, nc
break
else:
raise Exception("Should not happen")
for (r, c), (nr, nc) in zip(path[:-1], path[1:]):
obj = grid_locs[r, c]
nobj = grid_locs[nr, nc]
initial_state.add(onTrail(obj, nobj))
# Goal
goal_rcs = np.argwhere(grid == G)
assert len(goal_rcs) == 1
goal_r, goal_c = goal_rcs[0]
goal_obj = grid_locs[goal_r, goal_c]
goal = LiteralConjunction([at(goal_obj)])
filepath = os.path.join(PDDLDIR, problem_dir, problem_outfile)
PDDLProblemParser.create_pddl_file(
filepath,
objects=objects,
initial_state=initial_state,
problem_name="hiking",
domain_name=domain.domain_name,
goal=goal,
fast_downward_order=True,
)
print("Wrote out to {}.".format(filepath))
def generate_problems():
domain = PDDLDomainParser(os.path.join(PDDLDIR, "hiking.pddl"),
expect_action_preds=False,
operators_as_actions=True)
for problem_idx, grid in enumerate(TRAIN_GRIDS + TEST_GRIDS):
if problem_idx < len(TRAIN_GRIDS):
problem_dir = "hiking"
else:
problem_dir = "hiking_test"
problem_outfile = "problem{}.pddl".format(problem_idx)
create_problem(grid, domain, problem_dir, problem_outfile)
if __name__ == "__main__":
generate_problems()
|
95955
|
import unittest
from msdm.domains import GridWorld
class GridWorldTestCase(unittest.TestCase):
def test_feature_locations(self):
gw = GridWorld([
"cacg",
"sabb"])
fl = gw.feature_locations
lf = gw.location_features
fl2 = {}
for l, f in lf.items():
fl2[f] = fl2.get(f, []) + [l,]
assert all(set(fl[f]) == set(fl2[f]) for f in fl.keys())
def test_reachability(self):
gw = GridWorld([
"....#...g",
"....#....",
"#####....",
"s........",
])
assert len(gw.reachable_states()) == 22 #includes terminal
|
95980
|
from typing import List
from avalanche.evaluation.metric_results import MetricValue
from avalanche.evaluation.metric_utils import stream_type
from avalanche.logging.interactive_logging import InteractiveLogger
from tqdm import tqdm
from avalanche.training import BaseStrategy
from avalanche_rl.logging.strategy_logger import RLStrategyLogger
class TqdmWriteInteractiveLogger(InteractiveLogger, RLStrategyLogger):
"""
Allows to print out stats to console while updating
progress bar whitout breaking it.
"""
def __init__(self, log_every: int = 1):
super().__init__()
self.log_every = log_every
self.step_counter: int = 0
def print_current_metrics(self):
sorted_vals = sorted(self.metric_vals.values(),
key=lambda x: x[0])
for name, x, val in sorted_vals:
val = self._val_to_str(val)
tqdm.write(f'\t{name} = {val}', file=self.file)
def before_training_exp(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
super().before_training_exp(strategy, metric_values, **kwargs)
self._progress.total = strategy.current_experience_steps.value
def after_training_exp(self, strategy: 'BaseStrategy', metric_values: List['MetricValue'], **kwargs):
self._end_progress()
return super().after_training_exp(strategy, metric_values, **kwargs)
def after_training_iteration(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
self._progress.update()
self._progress.refresh()
super().after_update(strategy, metric_values, **kwargs)
if self.step_counter % self.log_every == 0:
self.print_current_metrics()
self.metric_vals = {}
self.step_counter += 1
def before_eval(self, strategy: 'BaseStrategy', metric_values: List['MetricValue'], **kwargs):
self.metric_vals = {}
tqdm.write('\n-- >> Start of eval phase << --', file=self.file)
def before_eval_exp(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
# super().before_eval_exp(strategy, metric_values, **kwargs)
# self._progress.total = strategy.eval_exp_len
action_name = 'training' if strategy.is_training else 'eval'
exp_id = strategy.experience.current_experience
task_id = strategy.experience.task_label
stream = stream_type(strategy.experience)
tqdm.write('-- Starting {} on experience {} (Task {}) from {} stream --'
.format(action_name, exp_id, task_id, stream), file=self.file)
def after_eval_exp(self, strategy: 'BaseStrategy', metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_update')
self.print_current_metrics()
exp_id = strategy.experience.current_experience
tqdm.write(f'> Eval on experience {exp_id} (Task '
f'{strategy.experience.task_label}) '
f'from {stream_type(strategy.experience)} stream ended.',
file=self.file)
def after_eval(self, strategy: 'BaseStrategy', metric_values: List['MetricValue'], **kwargs):
tqdm.write('-- >> End of eval phase << --\n', file=self.file)
# self.print_current_metrics()
self.metric_vals = {}
def before_training(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
tqdm.write('-- >> Start of training phase << --', file=self.file)
def after_training(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
tqdm.write('-- >> End of training phase << --', file=self.file)
|
95984
|
import numpy as np
from collections import OrderedDict
import cPickle as pickle
import time
import sys
sys.setrecursionlimit(2000)
import argparse
import RCN
from RCN.preprocessing.tools import EOF
from RCN.preprocessing.tools import shuffleData, splitData, mergeData
from RCN.preprocessing.preprocess import preprocess_iter, preprocess_once
from RCN.utils.queue import OrderedQueue
from multiprocessing import Process, Queue
import os
import string
source_dir = os.path.dirname(RCN.__file__)
dest_dir = source_dir + '/models/exp_shared_conv'
def get_data(mask_MTFL, mask_300W, all_train, **kwargs):
sys.stderr.write("\nloading data ...\n")
Train = OrderedDict()
Valid = OrderedDict()
Test = OrderedDict()
if mask_MTFL:
# getting the MTFL train, valid and AFLW test set
print "using 160by160 MTFL"
MTFL_train = source_dir + '/datasets/MTFL_raw/MTFL_train_160by160.pickle'
MTFL_test = source_dir + '/datasets/MTFL_raw/MTFL_test_160by160.pickle'
# getting the AFW test set
print "using 160by160 AFW"
AFW_test = source_dir + '/datasets/MTFL_raw/AFW_test_160by160.pickle'
sys.stderr.write("\nloading MTFL_train ...\n")
# loading data
with open(MTFL_train, 'rb') as fp:
train_set = pickle.load(fp)
sys.stderr.write("\nloading MTFL_test ...\n")
with open(MTFL_test, 'rb') as fp:
test_set = pickle.load(fp)
sys.stderr.write("\nloading AFW_test ...\n")
with open(AFW_test, 'rb') as fp:
afw_set = pickle.load(fp)
# get sets X and Y
raw_set_x = train_set['X']
raw_set_y = train_set['Y']
test_set_x = test_set['X']
test_set_y = test_set['Y']
afw_set_x = afw_set['X']
afw_set_y = afw_set['Y']
# shuffle the data and split between train and validation sets
shuffled_x, shuffled_y = shuffleData(raw_set_x, raw_set_y)
train_set_x, train_set_y, valid_set_x, valid_set_y = splitData(shuffled_x, shuffled_y, split_size=1000)
if all_train:
print "using all train set for MTFL"
train_set_x = shuffled_x
train_set_y = shuffled_y
# adding data to the sets
Train['MTFL'] = (train_set_x, train_set_y)
Valid['MTFL'] = (valid_set_x, valid_set_y)
MTFL_test = OrderedDict()
MTFL_test['aflw'] = (test_set_x, test_set_y)
MTFL_test['afw'] = (afw_set_x, afw_set_y)
Test['MTFL'] = MTFL_test
if mask_300W:
# getting the 300W train set
print "using 160by160 300W"
train_300W = source_dir + '/datasets/300W/300W_train_160by160.pickle'
test_300W = source_dir + '/datasets/300W/300W_test_160by160.pickle'
# loading data
sys.stderr.write("\nloading 300W_train ...\n")
with open(train_300W, 'rb') as fp:
train_set_300W = pickle.load(fp)
sys.stderr.write("\nloading 300W_test ...\n")
with open(test_300W, 'rb') as fp:
test_set_300W = pickle.load(fp)
# getting sets X and Y
train_set_300W_x = train_set_300W['X']
train_set_300W_y = train_set_300W['Y']
Helen_set = test_set_300W['Helen']
Helen_set_x = Helen_set['X']
Helen_set_y = Helen_set['Y']
ibug_set = test_set_300W['ibug']
ibug_set_x = ibug_set['X']
ibug_set_y = ibug_set['Y']
lfpw_set = test_set_300W['lfpw']
lfpw_set_x = lfpw_set['X']
lfpw_set_y = lfpw_set['Y']
sys.stderr.write("data loaded.\n")
# shuffle the data and split between train and validation sets for 300W
sz_300W = train_set_300W_x.shape[0]/10
shuffled_300W_x, shuffled_300W_y = shuffleData(train_set_300W_x, train_set_300W_y)
train_300W_x, train_300W_y, valid_300W_x, valid_300W_y = splitData(shuffled_300W_x, shuffled_300W_y, split_size=sz_300W)
if all_train:
print "using all train set for 300W"
train_300W_x = shuffled_300W_x
train_300W_y = shuffled_300W_y
# adding data to the sets
Train['300W'] = (train_300W_x, train_300W_y)
Valid['300W'] = (valid_300W_x, valid_300W_y)
W300_test = OrderedDict()
W300_test['ibug'] = (ibug_set_x, ibug_set_y)
W300_test['lfpw'] = (lfpw_set_x, lfpw_set_y)
W300_test['Helen'] = (Helen_set_x, Helen_set_y)
Test['300W'] = W300_test
sets = [Train, Valid, Test]
return sets
def preprocess_data(sets, mask_MTFL, mask_300W, scale_mul,
translate_mul, gray_scale, use_lcn, dist_ratio, target_dim=80, block_img=False,
fixed_block=False, rotation=10):
td = (target_dim, target_dim)
rng_seed = np.random.RandomState(0)
Train, Valid, Test = sets
MTFL_ratio = 3.8/4.0
rotation_set = None
if mask_MTFL:
# getting the data from the OrderedDict
train_set_x, train_set_y = Train['MTFL']
valid_set_x, valid_set_y = Valid['MTFL']
MTFL_test = Test['MTFL']
test_set_x, test_set_y = MTFL_test['aflw']
afw_set_x, afw_set_y = MTFL_test['afw']
######################
# preprocessing data #
######################
#note that set_x before and after pre-processing is a 4D tensor of size (#sampels, #rows, #cols, #channels)
# where #channels=1 if the image is gray-scaled
# setting the images to gray-scale and detecting the bounding box
print "\ndoing preprocess_once on train_set"
train_set_x, train_set_y = preprocess_once(train_set_x, train_set_y, gray_scale=gray_scale, dist_ratio=MTFL_ratio)
print "\ndoing preprocess_once on valid_set"
valid_set_x, valid_set_y = preprocess_once(valid_set_x, valid_set_y, gray_scale=gray_scale, dist_ratio=MTFL_ratio)
print "\ndoing preprocess_once on aflw test_set"
test_set_x, test_set_y = preprocess_once(test_set_x, test_set_y, gray_scale=gray_scale, dist_ratio=MTFL_ratio)
#downsampling the valid and test sets
print "\ndoing preprocess_iter on valid_set"
valid_set_x, valid_set_y = preprocess_iter(valid_set_x, valid_set_y, rng_seed, target_dim=td,
jitter=True, scale_mul=scale_mul, translate_mul=translate_mul,
sanity=False, use_lcn=use_lcn, dset='MTFL', block_img=block_img,
fixed_block=fixed_block, rotation=rotation)
print "\ndoing preprocess_iter on aflw test_set"
test_set_x, test_set_y = preprocess_iter(test_set_x, test_set_y, rng_seed, target_dim=td,
jitter=False, scale_mul=0., translate_mul=0., sanity=False,
use_lcn=use_lcn, dset='MTFL', block_img=False,
rotation_set=rotation_set)
# processing the afw test set
print "\ndoing preprocess_once on afw test_set"
afw_set_x, afw_set_y = preprocess_once(afw_set_x, afw_set_y, gray_scale=gray_scale, dist_ratio=MTFL_ratio)
print "\ndoing preprocess_iter on afw test_set"
afw_set_x, afw_set_y = preprocess_iter(afw_set_x, afw_set_y, rng_seed, target_dim=td,
jitter=False, scale_mul=0., translate_mul=0., sanity=False,
use_lcn=use_lcn, dset='MTFL', block_img=False,
rotation_set=rotation_set)
# adding data to the sets
Train['MTFL'] = (train_set_x, train_set_y)
Valid['MTFL'] = (valid_set_x, valid_set_y)
MTFL_test = OrderedDict()
MTFL_test['aflw'] = (test_set_x, test_set_y)
MTFL_test['afw'] = (afw_set_x, afw_set_y)
Test['MTFL'] = MTFL_test
if mask_300W:
train_set_x_300W, train_set_y_300W = Train['300W']
valid_set_x_300W, valid_set_y_300W = Valid['300W']
W300_test = Test['300W']
lfpw_set_x, lfpw_set_y = W300_test['lfpw']
ibug_set_x, ibug_set_y = W300_test['ibug']
Helen_set_x, Helen_set_y = W300_test['Helen']
# 300W set process_once
print "dist_ratio for 300W set is %f" %dist_ratio
print "\ndoing preprocess_once on 300W train_set"
train_set_x_300W, train_set_y_300W = preprocess_once(train_set_x_300W, train_set_y_300W, gray_scale=gray_scale, dist_ratio=dist_ratio)
print "\ndoing preprocess_once on 300W valid_set"
valid_set_x_300W, valid_set_y_300W = preprocess_once(valid_set_x_300W, valid_set_y_300W, gray_scale=gray_scale, dist_ratio=dist_ratio)
print "\ndoing preprocess_once on 300W Helen test_set"
Helen_set_x, Helen_set_y = preprocess_once(Helen_set_x, Helen_set_y, gray_scale=gray_scale, dist_ratio=dist_ratio)
print "\ndoing preprocess_once on 300W ibug test_set"
ibug_set_x, ibug_set_y = preprocess_once(ibug_set_x, ibug_set_y, gray_scale=gray_scale, dist_ratio=dist_ratio)
print "\ndoing preprocess_once on 300W lfpw test_set"
lfpw_set_x, lfpw_set_y = preprocess_once(lfpw_set_x, lfpw_set_y, gray_scale=gray_scale, dist_ratio=dist_ratio)
# 300W set process_iter for valid and test sets
print "\ndoing preprocess_iter on 300W valid_set"
valid_set_x_300W, valid_set_y_300W = preprocess_iter(valid_set_x_300W, valid_set_y_300W, rng_seed, target_dim=td,
jitter=True, scale_mul=scale_mul, translate_mul=translate_mul,
sanity=False, use_lcn=use_lcn, dset='300W', block_img=block_img,
fixed_block=fixed_block, rotation=rotation)
print "\ndoing preprocess_iter on 300W Helen test_set"
Helen_set_x, Helen_set_y = preprocess_iter(Helen_set_x, Helen_set_y, rng_seed, target_dim=td, jitter=False,
scale_mul=0., translate_mul=0., sanity=False, use_lcn=use_lcn, dset='300W', block_img=False)
print "\ndoing preprocess_iter on 300W ibug test_set"
ibug_set_x, ibug_set_y = preprocess_iter(ibug_set_x, ibug_set_y, rng_seed, target_dim=td, jitter=False,
scale_mul=0., translate_mul=0., sanity=False, use_lcn=use_lcn, dset='300W',
block_img=False)
print "\ndoing preprocess_iter on 300W lfpw test_set"
lfpw_set_x, lfpw_set_y = preprocess_iter(lfpw_set_x, lfpw_set_y, rng_seed, target_dim=td, jitter=False, scale_mul=0., translate_mul=0.,
sanity=False, use_lcn=use_lcn, dset='300W', block_img=False)
# adding data to the sets
Train['300W'] = (train_set_x_300W, train_set_y_300W)
Valid['300W'] = (valid_set_x_300W, valid_set_y_300W)
W300_test = OrderedDict()
W300_test['ibug'] = (ibug_set_x, ibug_set_y)
W300_test['lfpw'] = (lfpw_set_x, lfpw_set_y)
W300_test['Helen'] = (Helen_set_x, Helen_set_y)
Test['300W'] = W300_test
sets = [Train, Valid, Test]
return sets
def producer_process(process_ID, data_queue, seed_queue, data_sets, target_dim,
jitter, scale_mul, translate_mul, sanity, use_lcn, masks,
block_img, fixed_block, rotation):
if '300W' in masks:
train_set_x_300W, train_set_y_300W = data_sets['300W']
data_queue_300W = data_queue['300W']
if 'MTFL' in masks:
train_set_x_MTFL, train_set_y_MTFL = data_sets['MTFL']
data_queue_MTFL = data_queue['MTFL']
produce_start_time = time.time()
while True:
dset, SEED = seed_queue.get()
if not isinstance(SEED, EOF):
#sys.stderr.write("process_ID: %i, jittering for seed %i\n" %(process_ID, SEED))
start_time = time.time()
# jittering the train_set for this epoch
seed_rng = np.random.RandomState(SEED)
if dset == 'MTFL':
new_train_set_x, new_train_set_y = preprocess_iter(set_x=train_set_x_MTFL, set_y=train_set_y_MTFL,
seed_rng=seed_rng, target_dim=target_dim, jitter=jitter,
scale_mul=scale_mul, translate_mul=translate_mul, sanity=sanity,
use_lcn=use_lcn, dset=dset, block_img=block_img,
fixed_block=fixed_block, rotation=rotation)
elem = [new_train_set_x, new_train_set_y]
# the size of elem (new_train_set_x, new_train_set_y combined) for MTFL is about 56 Megabyte together
data_queue_MTFL.put(SEED,elem)
#sys.stderr.write('process_ID: %i, added SEED %i for set MTFL. Queue size is now %s\n' % (process_ID, SEED, data_queue_MTFL.qsize()))
elif dset == '300W':
new_train_set_x, new_train_set_y = preprocess_iter(set_x=train_set_x_300W, set_y=train_set_y_300W,
seed_rng=seed_rng, target_dim=target_dim, jitter=jitter,
scale_mul=scale_mul, translate_mul=translate_mul, sanity=sanity,
use_lcn=use_lcn, dset=dset, block_img=block_img,
fixed_block=fixed_block, rotation=rotation)
elem = [new_train_set_x, new_train_set_y]
# the size of elem (new_train_set_x, new_train_set_y combined) for 300W is about 21 Megabyte together
data_queue_300W.put(SEED,elem)
#sys.stderr.write('process_ID: %i, added SEED %i for set 300W. Queue size is now %s\n' % (process_ID, SEED, data_queue_300W.qsize()))
else:
raise Exception('cannot jitter dset %s' %(dset,))
end_time = time.time() # the end of training time
training_time = (end_time - start_time)
#sys.stderr.write('process_ID: %i, jittering took %f minutes\n' % (process_ID, training_time / 60.))
else:
sys.stderr.write('process_ID: %i. Done with producing.\n' %(process_ID))
produce_end_time = time.time()
producing_time = (produce_end_time - produce_start_time)
sys.stderr.write('jittring all data took %f minutes\n' % (producing_time / 60.))
break
def create_producers(Train, mask_MTFL, mask_300W, td, scale_mul,
translate_mul, use_lcn, block_img, fixed_block,
rotation, num_epochs, num_procs):
data_sets = OrderedDict()
data_queue = OrderedDict()
seed_queue = Queue()
NUMBER_OF_PROCESSES = num_procs
masks = []
producers = None
if mask_MTFL:
masks.append('MTFL')
# creating queue for the jittered data.
train_set_x_MTFL, train_set_y_MTFL = Train['MTFL']
data_sets['MTFL'] = (train_set_x_MTFL, train_set_y_MTFL)
data_queue_MTFL = OrderedQueue(maxsize=6)
data_queue['MTFL'] = data_queue_MTFL
# initializing the seed_queue
num_queue_elem = np.min((num_epochs, NUMBER_OF_PROCESSES + 1))
for i in xrange(num_queue_elem):
seed_queue.put(('MTFL', i))
if mask_300W:
masks.append('300W')
# creating queue for the jittered data.
train_set_x_300W, train_set_y_300W = Train['300W']
data_sets['300W'] = (train_set_x_300W, train_set_y_300W)
data_queue_300W = OrderedQueue(maxsize=15)
data_queue['300W'] = data_queue_300W
num_queue_elem = np.min((num_epochs, NUMBER_OF_PROCESSES + 9))
for i in xrange(num_queue_elem):
seed_queue.put(('300W', i))
assert len(data_sets) > 0
assert len(data_queue) > 0
assert len(data_queue) == len(data_sets) == len(masks)
sys.stderr.write("\nstarting %d workers\n" % NUMBER_OF_PROCESSES)
producers = [Process(target=producer_process, args=(i, data_queue, seed_queue,
data_sets, td, True, scale_mul, translate_mul, False, use_lcn, masks, block_img,
fixed_block, rotation))
for i in xrange(NUMBER_OF_PROCESSES)]
for pr in producers:
pr.daemon = True
pr.start()
sys.stderr.write("\n\ndone with creating and starting workers.\n\n")
return [producers, data_queue, seed_queue, NUMBER_OF_PROCESSES, num_queue_elem]
def load_preproc_initProcs(mask_MTFL, mask_300W, all_train, scale_mul, translate_mul,
gray_scale, use_lcn, dist_ratio, target_dim, block_img, fixed_block,
rotation, num_epochs, num_procs):
start_time = time.time()
td = (target_dim, target_dim)
print "the target_dim as the model's input is %s" %(td,)
# loading the data #
sets = get_data(mask_MTFL=mask_MTFL, mask_300W=mask_300W,
all_train=all_train)
# preprocessing the data
sets = preprocess_data(sets=sets, mask_MTFL=mask_MTFL, mask_300W=mask_300W,
scale_mul=scale_mul, translate_mul=translate_mul, gray_scale=gray_scale, use_lcn=use_lcn,
dist_ratio=dist_ratio, target_dim=target_dim, block_img=block_img, fixed_block=fixed_block,
rotation=rotation)
Train, Valid, Test = sets
# starting processes
producers, data_queue, seed_queue, NUMBER_OF_PROCESSES, num_queue_elem = create_producers(Train=Train, mask_MTFL=mask_MTFL, mask_300W=mask_300W,
td=td, scale_mul=scale_mul, translate_mul=translate_mul,
use_lcn=use_lcn, block_img=block_img, fixed_block=fixed_block,
rotation=rotation, num_epochs=num_epochs,
num_procs=num_procs)
return [sets, producers, data_queue, seed_queue, NUMBER_OF_PROCESSES, start_time, num_queue_elem]
def train_convNet(nkerns, num_epochs, learning_rate, batch_size, sliding_window_lenght, task_stop_threshold, L2_coef,
L2_coef_out, L2_coef_ful, use_ada_delta, decay, param_path, train_cost, gray_scale, scale_mul,
translate_mul, param_seed, Lambda_coefs, file_suffix, mask_MTFL, mask_300W, use_lcn, dist_ratio,
sw_lenght, all_train, paral_conv, target_dim, bilinear, coarse_conv_size, only_fine_tune_struc,
coarse_mask_branch, block_img, fixed_block, bch_norm, dropout, rotation, denoise_conv,
param_path_cfNet, nMaps_shuffled, use_res_2, use_res_1, extra_fine, large_F_filter,
load_no_output_params, save_no_output_params, train_all_kpts, dropout_kpts, num_model_kpts,
conv_size, param_path_strucNet, weight_per_pixel, no_fine_tune_model, conv_per_kpt,
linear_conv_per_kpt, only_49_kpts, concat_pool_locations, zero_non_pooled, num_procs,
learn_upsampling):
sets, producers, data_queue, seed_queue, NUMBER_OF_PROCESSES, start_time, num_queue_elem = load_preproc_initProcs(
mask_MTFL=mask_MTFL, mask_300W=mask_300W, all_train=all_train,
scale_mul=scale_mul, translate_mul=translate_mul, use_lcn=use_lcn,
dist_ratio=dist_ratio, target_dim=target_dim, block_img=block_img,
fixed_block=fixed_block, rotation=rotation, num_epochs=num_epochs,
gray_scale=gray_scale, num_procs=num_procs)
if gray_scale:
num_img_channels = 1
else:
num_img_channels = 3
parallel_start_time = time.time()
message = ''
if paral_conv:
if paral_conv == 1.0:
sys.stderr.write('training SumNet_MTFL\n')
from RCN.models.SumNet_MTFL import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate,
batch_size=batch_size, sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef_common=L2_coef, L2_coef_branch=L2_coef_ful, use_ada_delta=use_ada_delta, decay=decay, param_path=param_path,
train_cost=train_cost, file_suffix=file_suffix, num_img_channels=num_img_channels, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, Lambda_coefs=Lambda_coefs, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, bilinear=bilinear, coarse_mask_branch=coarse_mask_branch,
L2_coef_out=L2_coef_out, coarse_conv_size=coarse_conv_size, weight_per_pixel=weight_per_pixel, use_res_2=use_res_2,
conv_per_kpt=conv_per_kpt, linear_conv_per_kpt=linear_conv_per_kpt)
elif paral_conv == 2.0:
sys.stderr.write('training SumNet_300W\n')
from RCN.models.SumNet_300W import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate,
batch_size=batch_size, sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef_common=L2_coef, L2_coef_branch=L2_coef_ful, use_ada_delta=use_ada_delta, decay=decay, param_path=param_path,
train_cost=train_cost, file_suffix=file_suffix, num_img_channels=num_img_channels, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, Lambda_coefs=Lambda_coefs, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, bilinear=bilinear, bch_norm=bch_norm, dropout=dropout,
num_queue_elem=num_queue_elem, use_res_2=use_res_2, extra_fine=extra_fine, load_no_output_params=load_no_output_params,
coarse_conv_size=coarse_conv_size, conv_per_kpt=conv_per_kpt, linear_conv_per_kpt=linear_conv_per_kpt,
weight_per_pixel=weight_per_pixel)
elif paral_conv == 3.0:
sys.stderr.write('training RCN_MTFL\n')
from RCN.models.RCN_MTFL import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate,
batch_size=batch_size, sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef_common=L2_coef, L2_coef_branch=L2_coef_ful, use_ada_delta=use_ada_delta, decay=decay, param_path=param_path,
train_cost=train_cost, file_suffix=file_suffix, num_img_channels=num_img_channels, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, Lambda_coefs=Lambda_coefs, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, bilinear=bilinear, bch_norm=bch_norm,
dropout=dropout, num_queue_elem=num_queue_elem, extra_fine=extra_fine, save_no_output_params=save_no_output_params,
coarse_conv_size=coarse_conv_size, use_res_2=use_res_2, use_res_1=use_res_1)
elif paral_conv == 4.0:
sys.stderr.write('training RCN_MTFL_skip\n')
from RCN.models.RCN_MTFL_skip import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate,
batch_size=batch_size, sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef_common=L2_coef, L2_coef_branch=L2_coef_ful, use_ada_delta=use_ada_delta, decay=decay, param_path=param_path,
train_cost=train_cost, file_suffix=file_suffix, num_img_channels=num_img_channels, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, Lambda_coefs=Lambda_coefs, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, bilinear=bilinear, bch_norm=bch_norm, dropout=dropout,
num_queue_elem=num_queue_elem, extra_fine=extra_fine, save_no_output_params=save_no_output_params,
coarse_conv_size=coarse_conv_size, use_res_2=use_res_2, use_res_1=use_res_1)
elif paral_conv == 5.0:
sys.stderr.write('training RCN_300W\n')
from RCN.models.RCN_300W import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate,
batch_size=batch_size, sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef_common=L2_coef, L2_coef_branch=L2_coef_ful, use_ada_delta=use_ada_delta, decay=decay, param_path=param_path,
train_cost=train_cost, file_suffix=file_suffix, num_img_channels=num_img_channels, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, Lambda_coefs=Lambda_coefs, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, bilinear=bilinear, bch_norm=bch_norm, dropout=dropout,
num_queue_elem=num_queue_elem, use_res_2=use_res_2, use_res_1=use_res_1, extra_fine=extra_fine, large_F_filter=large_F_filter,
load_no_output_params=load_no_output_params, only_49_kpts=only_49_kpts, concat_pool_locations=concat_pool_locations,
zero_non_pooled=zero_non_pooled, learn_upsampling=learn_upsampling)
elif paral_conv == 6.0:
sys.stderr.write('training RCN_300W_skip\n')
from RCN.models.RCN_300W_skip import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate,
batch_size=batch_size, sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef_common=L2_coef, L2_coef_branch=L2_coef_ful, use_ada_delta=use_ada_delta, decay=decay, param_path=param_path,
train_cost=train_cost, file_suffix=file_suffix, num_img_channels=num_img_channels, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, Lambda_coefs=Lambda_coefs, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, bilinear=bilinear, bch_norm=bch_norm, dropout=dropout,
num_queue_elem=num_queue_elem)
elif denoise_conv:
if denoise_conv == 1.0:
sys.stderr.write('training denoising_300W model\n')
from RCN.models.Denoising_300W import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, batch_size=batch_size,
L2_coef=L2_coef, param_path=param_path, file_suffix=file_suffix, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, mask_MTFL=mask_MTFL, mask_300W=mask_300W, nMaps_shuffled=nMaps_shuffled,
producers=producers, sw_lenght=sw_lenght, target_dim=target_dim, num_queue_elem=num_queue_elem,
param_path_cfNet=param_path_cfNet, decay=decay, train_all_kpts=train_all_kpts, dropout_kpts=dropout_kpts,
num_model_kpts=num_model_kpts, conv_size=conv_size)
elif denoise_conv == 2.0:
sys.stderr.write('training denoising convnet model\n')
from RCN.models.fine_tune_cfNet_structured import Train
tr = Train(data_queue=data_queue, seed_queue=seed_queue, nkerns=nkerns, num_epochs=num_epochs, batch_size=batch_size,
L2_coef=L2_coef, param_path=param_path, file_suffix=file_suffix, sets=sets, param_seed=param_seed,
num_procs=NUMBER_OF_PROCESSES, mask_MTFL=mask_MTFL, mask_300W=mask_300W, producers=producers,
sw_lenght=sw_lenght, target_dim=target_dim, num_queue_elem=num_queue_elem, use_lcn=use_lcn,
param_path_cfNet=param_path_cfNet, param_path_strucNet=param_path_strucNet, decay=decay,
train_all_kpts=train_all_kpts, dropout_kpts=dropout_kpts, num_model_kpts=num_model_kpts,
conv_size=conv_size, num_img_channels=num_img_channels, no_fine_tune_model=no_fine_tune_model,
only_49_kpts=only_49_kpts, only_fine_tune_struc=only_fine_tune_struc)
message = tr.train()
for pr in producers:
pr.join()
if mask_300W:
data_queue['300W'].close()
if mask_MTFL:
data_queue['MTFL'].close()
end_time = time.time() # the end of training time
training_time = (end_time - start_time)
parallel_time = (end_time - parallel_start_time)
sys.stderr.write('parallel processes took %f minutes\n' % (parallel_time / 60.))
sys.stderr.write('running all program took %f minutes\n' % (training_time / 60.))
return message
def get_nkerns(paral_conv=0.0, denoise_conv=0.0, **kwargs):
SumNet_MTFL = [5, 16, 16, 32, 32, 48, 48, 48, 48, 48, 48]
SumNet_300W = [68, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64]
RCN_MTFL = [5, 16, 32, 48, 48, 48, 48, 32, 32, 16, 16, 48, 48, 48]
RCN_300W = [68, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64]
Denoising_300W = [68, 64, 64, 64]
if paral_conv == 1:
nkerns = SumNet_MTFL
elif paral_conv == 2:
nkerns = SumNet_300W
elif paral_conv in [3, 4]:
nkerns = RCN_MTFL
elif paral_conv in [5, 6]:
nkerns = RCN_300W
if denoise_conv in [1, 2]:
nkerns = Denoising_300W
return nkerns
def get_target_dim(target_dim, bilinear):
if bilinear and target_dim == 40:
target_dim = 41
if bilinear and target_dim == 80:
target_dim = 81
if not bilinear and target_dim == 41:
target_dim = 40
if not bilinear and target_dim == 81:
target_dim = 80
return target_dim
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Processing the convNet param list.')
################################################
# getting the parameters from the command line #
################################################
parser.add_argument('--num_epochs', type=int, default=-1)
# the default learning_rate in TCDCN paper is 0.003
parser.add_argument('--learning_rate', type=float, default=0.003)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--sliding_window_lenght', type=int, default=200)
# the default task_stop_threshold in TCDCN paper is 0.5
parser.add_argument('--task_stop_threshold', type=float, default=0.5)
# the default L2_coef in TCDCN paper is 1.0
# L2 coef for all layers except the fully connected layer and the last layer
parser.add_argument('--L2_coef', type=float, default=0.0001)
# L2 coef for the fully-connected layer
parser.add_argument('--L2_coef_ful', type=float, default=0.0001)
# L2 coef for the output layer
parser.add_argument('--L2_coef_out', type=float, default=None)
# the lambda coefficient for the multi-tasking
parser.add_argument('--Lambda', type=str, help='the coefficient for multi-tasking. If one value given, all tasks get\
the same multiplier, otherwise 4 values should be given seperated by comma with no space', default='1.0')
parser.add_argument('--use_ada_delta', action='store_false', default=True)
# the decay coefficient for ada_delta, the default vaue is 0.95
parser.add_argument('--decay', type=float, default=0.95)
# complete pickle path to the param_file should be given should be given
parser.add_argument('--param_path', type=str, default="")
# complete pickle path to the param_file of first keypoint detection
# convnet model
parser.add_argument('--param_path_cfNet', type=str, default="")
# complete pickle path to the param_file of the structured keypoint detection model
parser.add_argument('--param_path_strucNet', type=str, default="")
parser.add_argument('--nMaps_shuffled', type=int, help='the number of channels to be shuffled for each sample',default=35)
# keypoint locations normalized by the inter-ocular distance
parser.add_argument('--cost', choices=['cost_kpt', 'error_kpt_avg', 'cross_entropy'], default='cost_kpt')
parser.add_argument('--gray_scale', action='store_false', default=True)
parser.add_argument('--file_suffix', type=str, default="")
# the multiplier of face_bounding box for jittering
parser.add_argument('--dist_ratio', type=float, help='the multiplier of face_bounding box for jittering', default=1.3)
# the jittering multiplier for train and valid sets
parser.add_argument('--scale_mul', type=float, default=0.5)
parser.add_argument('--translate_mul', type=float, default=0.5)
# setting the seed for parameters initialization
parser.add_argument('--param_seed', type=int, default=54621)
parser.add_argument('--use_lcn', help='indicates whether to use lcn or not', action='store_true', default=False)
# the lenght of the sliding window for value averaging
parser.add_argument('--sw', type=int, default=-1)
parser.add_argument('--denoise_conv', type=float, help='0.0=no_denoise, 1.0=denose_conv, 2.0=strcutred_kpt_dist\
3.0=mlp_denoise_model, 4.0=finetune_both_models, 5.0=DAE_with_RCN_maps, 6.0=fine_tune_cfNet_DAE\
7.0=multi_stage_DAE', choices=[0.0, 1.0, 2.0], default=0.0)
parser.add_argument('--all_train', help='indicates whether to train on all trainset or not', action='store_true', default=False)
parser.add_argument('--paral_conv', type=float, help='indicates whether to use the two branch parallel conv, multi-scale\
conv or none, choices=[1.0=SumNet_MTFL, 2=SumNet_300W, 3=RCN_MTFL,\
4=RCN_MTFL_skip, 5=RCN_300W, 6=RCN_300W_skip, 0=none]',
choices=[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], default=0.0)
parser.add_argument('--target_dim', type=int, help='dimension of input images to the model choices=[40, 41, 80, 81]', choices=[40, 80], default=80)
parser.add_argument('--bilinear', help='indicates whether to use bilinear interpolation for upsampling or just repeating values ', action='store_true', default=False)
parser.add_argument('--coarse_mask_branch', help='a string of four values (only 0 and 1) seperated by comma, starting from F going to S', type=str, default="1,1,1,1,1")
parser.add_argument('--coarse_conv_size', type=int, help='conv_size of the coarset branch of conv models', default=3)
parser.add_argument('--block_img', help='indicates whether to block part of the image as preprocessing or not', action='store_true', default=False)
parser.add_argument('--fixed_block', help='indicates whether to randomly choose the block size or not, default is True', action='store_true', default=False)
parser.add_argument('--bch_norm', help='indicates whether to use batch_normalization in convnet or not', action='store_true', default=False)
parser.add_argument('--dropout', help='indicates whether to use dropout in convnet or not', action='store_true', default=False)
parser.add_argument('--rotation', type=int, help='the max rotationation for preprocessing', default=40)
parser.add_argument('--use_res_2', help='indicates whether to use 2by2 branch resolution or not', action='store_true', default=False)
parser.add_argument('--use_res_1', help='indicates whether to use up to 1by1 branch resolution or not', action='store_true', default=False)
parser.add_argument('--extra_fine', help='indicates whether to use extra fine layer in RCN_300W or not', action='store_true', default=False)
parser.add_argument('--large_F_filter', help='indicates whether to use filter size of 5 for fine layers of RCN models or not',
action='store_true', default=False)
parser.add_argument('--load_no_output_params', help='indicates whether to load the model\'s output layer params or not', action='store_true', default=False)
parser.add_argument('--save_no_output_params', help='indicates whether to save the model\'s params excluding the output layer', action='store_true', default=False)
parser.add_argument('--train_all_kpts', help='indicates to train on all keypoints when training denoise_conv model', action='store_true', default=False)
parser.add_argument('--dropout_kpts', help='indicates to masks the random selected keypoint when training the structured model', action='store_true', default=False)
parser.add_argument('--num_model_kpts', help='the number of cfNet keypoints to be used when building the structured model', type=int, default=68)
parser.add_argument('--conv_size', type=int, help='conv_size of the strucutured kpt model', default=45)
parser.add_argument('--weight_per_pixel', help='indicates whether to use a per-pixel weight (in each branch for each kpt)\
in SumNet model or not', action='store_true', default=False)
parser.add_argument('--conv_per_kpt', help='indicates whether to use a convnet on all branch feature maps per keypoint\
in SumNet model or not', action='store_true', default=False)
parser.add_argument('--linear_conv_per_kpt', help='indicates whether to use a convnet on all branch feature maps per keypoint\
in SumNet model or not, in this case the final convolution in each branch is linear', action='store_true', default=False)
parser.add_argument('--no_fine_tune_model', help='indicates whether to fine_tune the parameters of the two models (CakeNet and denoising)\
in the joint model', action='store_true', default=False)
parser.add_argument('--only_fine_tune_struc', help='if used indicates to only finetune the structured model', action='store_true', default=False)
parser.add_argument('--only_49_kpts', help='indicates whether to evaluate only for 49 keypoints in fine_tune_cfNet_structured model or not\
', action='store_true', default=False)
parser.add_argument('--concat_pool_locations', help=('indicates whether to concatenate pooled locations (as zero-one tensor)'
' or not in RCN model when merging branches'), action='store_true', default=False)
parser.add_argument('--zero_non_pooled', help=('indicates whether to set to zero the non pooled values of the pre-pooled'
' maps in RCN model when concatenating feature maps or not'), action='store_true', default=False)
parser.add_argument('--num_procs', type=int, help='number_of_processors for jittering data', default=2)
parser.add_argument('--learn_upsampling', help=('indicates whether to learn the weights for upsampling not')
, action='store_true', default=False)
args = parser.parse_args()
paral_conv = args.paral_conv
print "paral_conv is %s" %(paral_conv,)
###########################
# printing out the values #
###########################
learning_rate = args.learning_rate
print "learning_rate is %f" %(learning_rate)
batch_size = args.batch_size
print "batch_size is %i" %(batch_size)
sliding_window_lenght = args.sliding_window_lenght
print "sliding_window_lenght is %i" %(sliding_window_lenght)
task_stop_threshold = args.task_stop_threshold
print "task_stop_threshold is %f" %(task_stop_threshold)
Lambda = args.Lambda
Lambda = Lambda.split(',')
if len(Lambda) == 1:
lam = float(Lambda[0])
Lambda_coefs = [lam, lam, lam, lam]
else:
assert len(Lambda) == 4
Lambda = map(float, Lambda)
Lambda_coefs = [Lambda[0], Lambda[1], Lambda[2], Lambda[3]]
print "Lamda_coefs are %s" %(Lambda_coefs,)
print "---------------------------------"
param_seed = args.param_seed
print "SEED for parameters initailization is: %i" %param_seed
gray_scale = args.gray_scale
print "gray_scale is %r" %(gray_scale)
use_ada_delta = args.use_ada_delta
# if learning rate is specified it should use sgd and not ada_delta
if learning_rate != 0.003:
use_ada_delta = False
print "use_ada_delta is %r" %(use_ada_delta)
all_train = args.all_train
print "all_train is %r" %all_train
param_path = args.param_path
print "param_path for loading previously save params is %s" %(param_path)
param_path_cfNet = args.param_path_cfNet
print "param_path_cfNet for loading first convnet model is %s" %(param_path_cfNet)
param_path_strucNet = args.param_path_strucNet
print "param_path_strucNet for loading first convnet model is %s" %(param_path_strucNet)
train_cost = args.cost
print "cost is %s" %train_cost
print "---------------------------------"
####################
# important infor #
####################
num_epochs = args.num_epochs
# if num_epochs is given use the given value
# else set it based on the dataset
if num_epochs == -1:
if args.mask_MTFL:
num_epochs = 3000
else:
num_epochs = 10000
print "num_epochs is %i" %(num_epochs)
L2_coef = args.L2_coef
print "L2_coef is %f" %(L2_coef)
L2_coef_ful = args.L2_coef_ful
print "L2_coef for the fully connected layer is %f" %(L2_coef_ful)
L2_coef_out = args.L2_coef_out
if L2_coef_out is None:
L2_coef_out = L2_coef
print "L2_coef for the output layer is %f" %(L2_coef_out)
decay = args.decay
print "the decay coef for adadelta is %f" %decay
scale_mul = args.scale_mul
print "scale_mul is %f" %scale_mul
translate_mul = args.translate_mul
print "translate_mul is %f" %translate_mul
denoise_conv = args.denoise_conv
print "denoise_conv is %s" %(denoise_conv,)
if paral_conv in [2, 5, 6] or denoise_conv in [1, 2]:
mask_MTFL = 0
mask_300W = 1
elif paral_conv in [1, 3, 4]:
mask_300W = 0
mask_MTFL = 1
print "mask_MTFL is %f " %mask_MTFL
print "mask_300W is %f " %mask_300W
use_lcn = args.use_lcn
print "use_lcn is %r " %use_lcn
file_suffix = '_' + args.file_suffix
print "file_suffix is %s" %(file_suffix)
dist_ratio = args.dist_ratio
print "dist_ratio is %f" %dist_ratio
# if sw is given, use the given value
# else set it to 10% of the num_epochs
sw = args.sw
if sw == -1:
sw = num_epochs / 10
print "The sw_lenght is %s" %(sw,)
br_mask = args.coarse_mask_branch
br_mask = br_mask.split(',')
br_mask = map(string.strip, br_mask)
br_mask = map(int, br_mask)
assert all(x==0 or x==1 for x in br_mask)
coarse_mask_branch = np.array(br_mask)
print "The coarse mask_branch in SumNet_MTFL model is %s" %(coarse_mask_branch,)
bilinear = args.bilinear
print "bilinear is %s" %(bilinear, )
target_dim = args.target_dim
target_dim = get_target_dim(target_dim, bilinear)
print "target_dim is %r" %(target_dim,)
nkerns = get_nkerns(paral_conv, denoise_conv)
print "nkerns are %s" %(nkerns,)
coarse_conv_size = args.coarse_conv_size
print "coarse_conv_size is %s" %(coarse_conv_size,)
block_img = args.block_img
print "block_img is %s" %(block_img,)
fixed_block = args.fixed_block
print "fixed_block is %s" %(fixed_block,)
bch_norm = args.bch_norm
print "bch_norm is %s" %(bch_norm,)
dropout = args.dropout
print "dropout is %s" %(dropout)
rotation = args.rotation
print "rotation is %s" %(rotation)
nMaps_shuffled = args.nMaps_shuffled
print "nMaps_shuffled is %s" %(nMaps_shuffled,)
use_res_2 = args.use_res_2
print "use_res_2 is %s" %(use_res_2)
use_res_1 = args.use_res_1
print "use_res_1 is %s" %(use_res_1)
extra_fine = args.extra_fine
print "extra_fine layer for 300W model is %s" %(extra_fine)
large_F_filter = args.large_F_filter
print "large_F_filter for 300W model is %s" %(large_F_filter)
load_no_output_params = args.load_no_output_params
print "load_no_output_params for 300W model is %s" %(load_no_output_params)
save_no_output_params = args.save_no_output_params
print "save_no_output_params is %s" %(save_no_output_params)
train_all_kpts = args.train_all_kpts
print "train_all_kpts is %s" %(train_all_kpts)
dropout_kpts = args.dropout_kpts
print "dropout_kpts is %s " %(dropout_kpts)
num_model_kpts = args.num_model_kpts
print "num_model_kpts is %s" %(num_model_kpts,)
conv_size = args.conv_size
print "conv_size is %s" %(conv_size,)
weight_per_pixel = args.weight_per_pixel
print "weight_per_pixel is %s" %(weight_per_pixel)
conv_per_kpt = args.conv_per_kpt
print "conv_per_kpt is %s" %(conv_per_kpt)
linear_conv_per_kpt = args.linear_conv_per_kpt
print "linear_conv_per_kpt is %s" %(linear_conv_per_kpt)
no_fine_tune_model = args.no_fine_tune_model
print "no_fine_tune_model is %s" %(no_fine_tune_model)
only_fine_tune_struc = args.only_fine_tune_struc
print "only_fine_tune_struc is %s" %(only_fine_tune_struc)
only_49_kpts = args.only_49_kpts
print "only_49_kpts is %s" %(only_49_kpts)
concat_pool_locations = args.concat_pool_locations
print "concat_pool_locations is %s" %(concat_pool_locations,)
zero_non_pooled = args.zero_non_pooled
print "zero_non_pooled is %s" %(zero_non_pooled,)
num_procs = args.num_procs
print "num_procs is %s" %(num_procs,)
learn_upsampling = args.learn_upsampling
print "learn_upsampling is %s" %(learn_upsampling)
####################################
# saving the settings for this run #
####################################
params = vars(args)
if file_suffix != '_':
params_name = 'shared_conv_setting%s.pickle' %file_suffix
else:
params_name = 'shared_conv_setting.pickle'
params_path = dest_dir + '/' + params_name
with open (params_path, 'wb') as fp:
pickle.dump(params, fp)
message = train_convNet(nkerns=nkerns, num_epochs=num_epochs, learning_rate=learning_rate, batch_size=batch_size,
sliding_window_lenght=sliding_window_lenght, task_stop_threshold=task_stop_threshold,
L2_coef=L2_coef, L2_coef_out=L2_coef_out, L2_coef_ful=L2_coef_ful, use_ada_delta=use_ada_delta,
decay=decay, param_path=param_path, train_cost=train_cost, gray_scale=gray_scale,
scale_mul=scale_mul, translate_mul=translate_mul, param_seed=param_seed, Lambda_coefs=Lambda_coefs,
file_suffix=file_suffix, mask_MTFL=mask_MTFL, mask_300W=mask_300W, use_lcn=use_lcn,
dist_ratio=dist_ratio, sw_lenght=sw, all_train=all_train, paral_conv=paral_conv,
target_dim=target_dim, bilinear=bilinear, coarse_conv_size=coarse_conv_size, block_img=block_img,
coarse_mask_branch=coarse_mask_branch, fixed_block=fixed_block, bch_norm=bch_norm,
dropout=dropout, rotation=rotation, denoise_conv=denoise_conv, learn_upsampling=learn_upsampling,
param_path_cfNet=param_path_cfNet, nMaps_shuffled=nMaps_shuffled, only_49_kpts=only_49_kpts,
use_res_2=use_res_2, use_res_1=use_res_1, extra_fine=extra_fine, load_no_output_params=load_no_output_params,
large_F_filter=large_F_filter, save_no_output_params=save_no_output_params, train_all_kpts=train_all_kpts,
dropout_kpts=dropout_kpts, num_model_kpts=num_model_kpts, conv_size=conv_size, num_procs=num_procs,
param_path_strucNet=param_path_strucNet, weight_per_pixel=weight_per_pixel, conv_per_kpt=conv_per_kpt,
no_fine_tune_model=no_fine_tune_model, linear_conv_per_kpt=linear_conv_per_kpt,
concat_pool_locations=concat_pool_locations, zero_non_pooled=zero_non_pooled,
only_fine_tune_struc=only_fine_tune_struc)
sys.stderr.write("file_suffix is %s\n" %(file_suffix ))
sys.stderr.write("%s\n" %(message,))
|
95985
|
import unittest
import numpy as np
import torch
from pyscf import gto
from torch.autograd import Variable, grad, gradcheck
from qmctorch.scf import Molecule
from qmctorch.wavefunction import SlaterJastrow
torch.set_default_tensor_type(torch.DoubleTensor)
def hess(out, pos):
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out, pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape[0]))
hess = torch.zeros(jacob.shape)
for idim in range(jacob.shape[1]):
tmp = grad(jacob[:, idim], pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
hess[:, idim] = tmp[:, idim]
return hess
def hess_mixed_terms(out, pos):
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out, pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape[0]))
hess = torch.zeros(jacob.shape)
nelec = pos.shape[1]//3
k = 0
for ielec in range(nelec):
ix = ielec*3
tmp = grad(jacob[:, ix], pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
hess[:, k] = tmp[:, ix+1]
k = k + 1
hess[:, k] = tmp[:, ix+2]
k = k + 1
iy = ielec*3 + 1
tmp = grad(jacob[:, iy], pos,
grad_outputs=z,
only_inputs=True,
create_graph=True)[0]
hess[:, k] = tmp[:, iy+1]
k = k + 1
return hess
class TestAOderivativesPyscf(unittest.TestCase):
def setUp(self):
torch.manual_seed(101)
np.random.seed(101)
# define the molecule
at = 'Li 0 0 0; H 0 0 1'
basis = 'dzp'
self.mol = Molecule(atom=at,
calculator='pyscf',
basis=basis,
unit='bohr')
self.m = gto.M(atom=at, basis=basis, unit='bohr')
# define the wave function
self.wf = SlaterJastrow(self.mol, include_all_mo=True)
# define the grid points
npts = 11
self.pos = torch.rand(npts, self.mol.nelec * 3)
self.pos = Variable(self.pos)
self.pos.requires_grad = True
def test_ao_deriv(self):
ao = self.wf.ao(self.pos)
dao = self.wf.ao(self.pos, derivative=1)
dao_grad = grad(
ao, self.pos, grad_outputs=torch.ones_like(ao))[0]
gradcheck(self.wf.ao, self.pos)
assert(torch.allclose(dao.sum(), dao_grad.sum()))
def test_ao_grad_sum(self):
ao = self.wf.ao(self.pos)
dao_sum = self.wf.ao(self.pos, derivative=1, sum_grad=True)
dao = self.wf.ao(self.pos, derivative=1, sum_grad=False)
assert(torch.allclose(dao_sum, dao.sum(-1)))
def test_ao_hess(self):
ao = self.wf.ao(self.pos)
d2ao = self.wf.ao(self.pos, derivative=2)
d2ao_grad = hess(ao, self.pos)
assert(torch.allclose(d2ao.sum(), d2ao_grad.sum()))
def test_ao_hess_sum(self):
ao = self.wf.ao(self.pos)
d2ao_sum = self.wf.ao(self.pos, derivative=2, sum_hess=True)
d2ao = self.wf.ao(self.pos, derivative=2, sum_hess=False)
assert(torch.allclose(d2ao_sum, d2ao.sum(-1)))
def test_ao_mixed_der(self):
ao = self.wf.ao(self.pos)
d2ao = self.wf.ao(self.pos, derivative=3)
d2ao_auto = hess_mixed_terms(ao, self.pos)
assert(torch.allclose(d2ao.sum(), d2ao_auto.sum()))
def test_ao_all(self):
ao = self.wf.ao(self.pos)
dao = self.wf.ao(self.pos, derivative=1, sum_grad=False)
d2ao = self.wf.ao(self.pos, derivative=2)
ao_all, dao_all, d2ao_all = self.wf.ao(
self.pos, derivative=[0, 1, 2])
assert(torch.allclose(ao, ao_all))
assert(torch.allclose(dao, dao_all))
assert(torch.allclose(d2ao, d2ao_all))
if __name__ == "__main__":
# unittest.main()
t = TestAOderivativesPyscf()
t.setUp()
t.test_ao_mixed_der()
# t.test_ao_all()
# t.test_ao_deriv()
# t.test_ao_hess()
|
95989
|
class InwardMeta(type):
@classmethod
def __prepare__(meta, name, bases, **kwargs):
cls = super().__new__(meta, name, bases, {})
return {"__newclass__": cls}
def __new__(meta, name, bases, namespace):
cls = namespace["__newclass__"]
del namespace["__newclass__"]
for name in namespace:
setattr(cls, name, namespace[name])
return cls
|
95999
|
from icolos.core.workflow_steps.step import StepBase
from pydantic import BaseModel
from openmm.app import PDBFile
import parmed
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.utils import get_data_file_path
from icolos.utils.enums.step_enums import StepOpenFFEnum
import os
# Note that this implementation leverages parmed for now, although will likely move over to the OpenFF Interchange tooling once stable
_SOFE = StepOpenFFEnum()
# TOOD: Eventually this step should be able to do a pdb2gmx job
class StepOFF2gmx(StepBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
def parametrise_mols(self, tmp_dir: str):
"""
Generate parameters for each mol
"""
# TODO: do we want to throw everything together or split the params into separate files?
mols = [
Molecule.from_smiles(smi)
for smi in self.get_additional_setting(_SOFE.UNIQUE_MOLS)
]
pdb_file = PDBFile(
os.path.join(tmp_dir, self.data.generic.get_argument_by_extension("pdb"))
)
omm_topology = pdb_file.topology
off_topology = Topology.from_openmm(omm_topology, unique_molecules=mols)
forcefield = ForceField(self.get_additional_setting(_SOFE.FORCEFIELD))
omm_system = forcefield.create_openmm_system(off_topology)
if self.get_additional_setting(_SOFE.METHOD, _SOFE.PARMED) == _SOFE.PARMED:
parmed_struct = parmed.openmm.load_topology(
omm_topology, omm_system, pdb_file.positions
)
parmed_struct.save(os.path.join(tmp_dir, "MOL.top"), overwrite=True)
parmed_struct.save(os.path.join(tmp_dir, "MOL.gro"), overwrite=True)
# TODO: validate energy differences
elif self.get_additional_setting(_SOFE.METHOD) == _SOFE.INTERCHANGE:
raise NotImplementedError
else:
raise NotImplementedError
def execute(self):
"""
Builds a system and parametrise using OpenFF SAGE params, then convert to a GROMACS top/gro format for downstream simulation
"""
tmp_dir = self._make_tmpdir()
self.data.generic.write_out_all_files(tmp_dir)
self.parametrise_mols(tmp_dir)
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
# If we want to build OpenFF params instead of gaff, we would need to make a call to a different parametrisation pipeline, then load the protein into a ParmEd System, combine with the OpenFF-built system and combine into a gro/top file.
|
96001
|
import codecs
from contextlib import suppress
import logging
import os
from pathlib import Path
from typing import Union
import tempfile
_LOGGER = logging.getLogger(__name__)
def ensure_unique_string(preferred_string, current_strings):
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
def indent_all_but_first_and_last(text, padding=" "):
lines = text.splitlines(True)
if len(lines) <= 2:
return text
return lines[0] + "".join(padding + line for line in lines[1:-1]) + lines[-1]
def indent_list(text, padding=" "):
return [padding + line for line in text.splitlines()]
def indent(text, padding=" "):
return "\n".join(indent_list(text, padding))
# From https://stackoverflow.com/a/14945195/8924614
def cpp_string_escape(string, encoding="utf-8"):
def _should_escape(byte): # type: (int) -> bool
if not 32 <= byte < 127:
return True
if byte in (ord("\\"), ord('"')):
return True
return False
if isinstance(string, str):
string = string.encode(encoding)
result = ""
for character in string:
if _should_escape(character):
result += f"\\{character:03o}"
else:
result += chr(character)
return f'"{result}"'
def run_system_command(*args):
import subprocess
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
stdout, stderr = p.communicate()
rc = p.returncode
return rc, stdout, stderr
def mkdir_p(path):
if not path:
# Empty path - means create current dir
return
try:
os.makedirs(path)
except OSError as err:
import errno
if err.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
from esphome.core import EsphomeError
raise EsphomeError(f"Error creating directories {path}: {err}") from err
def is_ip_address(host):
parts = host.split(".")
if len(parts) != 4:
return False
try:
for p in parts:
int(p)
return True
except ValueError:
return False
def _resolve_with_zeroconf(host):
from esphome.core import EsphomeError
from esphome.zeroconf import EsphomeZeroconf
try:
zc = EsphomeZeroconf()
except Exception as err:
raise EsphomeError(
"Cannot start mDNS sockets, is this a docker container without "
"host network mode?"
) from err
try:
info = zc.resolve_host(f"{host}.")
except Exception as err:
raise EsphomeError(f"Error resolving mDNS hostname: {err}") from err
finally:
zc.close()
if info is None:
raise EsphomeError(
"Error resolving address with mDNS: Did not respond. "
"Maybe the device is offline."
)
return info
def resolve_ip_address(host):
from esphome.core import EsphomeError
import socket
errs = []
if host.endswith(".local"):
try:
return _resolve_with_zeroconf(host)
except EsphomeError as err:
errs.append(str(err))
try:
return socket.gethostbyname(host)
except OSError as err:
errs.append(str(err))
raise EsphomeError(f"Error resolving IP address: {', '.join(errs)}") from err
def get_bool_env(var, default=False):
return bool(os.getenv(var, default))
def is_ha_addon():
return get_bool_env("ESPHOME_IS_HA_ADDON")
def walk_files(path):
for root, _, files in os.walk(path):
for name in files:
yield os.path.join(root, name)
def read_file(path):
try:
with codecs.open(path, "r", encoding="utf-8") as f_handle:
return f_handle.read()
except OSError as err:
from esphome.core import EsphomeError
raise EsphomeError(f"Error reading file {path}: {err}") from err
except UnicodeDecodeError as err:
from esphome.core import EsphomeError
raise EsphomeError(f"Error reading file {path}: {err}") from err
def _write_file(path: Union[Path, str], text: Union[str, bytes]):
"""Atomically writes `text` to the given path.
Automatically creates all parent directories.
"""
if not isinstance(path, Path):
path = Path(path)
data = text
if isinstance(text, str):
data = text.encode()
directory = path.parent
directory.mkdir(exist_ok=True, parents=True)
tmp_path = None
try:
with tempfile.NamedTemporaryFile(
mode="wb", dir=directory, delete=False
) as f_handle:
tmp_path = f_handle.name
f_handle.write(data)
# Newer tempfile implementations create the file with mode 0o600
os.chmod(tmp_path, 0o644)
# If destination exists, will be overwritten
os.replace(tmp_path, path)
finally:
if tmp_path is not None and os.path.exists(tmp_path):
try:
os.remove(tmp_path)
except OSError as err:
_LOGGER.error("Write file cleanup failed: %s", err)
def write_file(path: Union[Path, str], text: str):
try:
_write_file(path, text)
except OSError as err:
from esphome.core import EsphomeError
raise EsphomeError(f"Could not write file at {path}") from err
def write_file_if_changed(path: Union[Path, str], text: str) -> bool:
"""Write text to the given path, but not if the contents match already.
Returns true if the file was changed.
"""
if not isinstance(path, Path):
path = Path(path)
src_content = None
if path.is_file():
src_content = read_file(path)
if src_content == text:
return False
write_file(path, text)
return True
def copy_file_if_changed(src: os.PathLike, dst: os.PathLike) -> None:
import shutil
if file_compare(src, dst):
return
mkdir_p(os.path.dirname(dst))
try:
shutil.copyfile(src, dst)
except OSError as err:
if isinstance(err, PermissionError):
# Older esphome versions copied over the src file permissions too.
# So when the dst file had 444 permissions, the dst file would have those
# too and subsequent writes would fail
# -> delete file (it would be overwritten anyway), and try again
# if that fails, use normal error handler
with suppress(OSError):
os.unlink(dst)
shutil.copyfile(src, dst)
return
from esphome.core import EsphomeError
raise EsphomeError(f"Error copying file {src} to {dst}: {err}") from err
def list_starts_with(list_, sub):
return len(sub) <= len(list_) and all(list_[i] == x for i, x in enumerate(sub))
def file_compare(path1: os.PathLike, path2: os.PathLike) -> bool:
"""Return True if the files path1 and path2 have the same contents."""
import stat
try:
stat1, stat2 = os.stat(path1), os.stat(path2)
except OSError:
# File doesn't exist or another error -> not equal
return False
if (
stat.S_IFMT(stat1.st_mode) != stat.S_IFREG
or stat.S_IFMT(stat2.st_mode) != stat.S_IFREG
):
# At least one of them is not a regular file (or does not exist)
return False
if stat1.st_size != stat2.st_size:
# Different sizes
return False
bufsize = 8 * 1024
# Read files in blocks until a mismatch is found
with open(path1, "rb") as fh1, open(path2, "rb") as fh2:
while True:
blob1, blob2 = fh1.read(bufsize), fh2.read(bufsize)
if blob1 != blob2:
# Different content
return False
if not blob1:
# Reached end
return True
# A dict of types that need to be converted to heaptypes before a class can be added
# to the object
_TYPE_OVERLOADS = {
int: type("EInt", (int,), {}),
float: type("EFloat", (float,), {}),
str: type("EStr", (str,), {}),
dict: type("EDict", (str,), {}),
list: type("EList", (list,), {}),
}
# cache created classes here
_CLASS_LOOKUP = {}
def add_class_to_obj(value, cls):
"""Add a class to a python type.
This function modifies value so that it has cls as a basetype.
The value itself may be modified by this action! You must use the return
value of this function however, since some types need to be copied first (heaptypes).
"""
if isinstance(value, cls):
# If already is instance, do not add
return value
try:
orig_cls = value.__class__
key = (orig_cls, cls)
new_cls = _CLASS_LOOKUP.get(key)
if new_cls is None:
new_cls = orig_cls.__class__(orig_cls.__name__, (orig_cls, cls), {})
_CLASS_LOOKUP[key] = new_cls
value.__class__ = new_cls
return value
except TypeError:
# Non heap type, look in overloads dict
for type_, func in _TYPE_OVERLOADS.items():
# Use type() here, we only need to trigger if it's the exact type,
# as otherwise we don't need to overload the class
if type(value) is type_: # pylint: disable=unidiomatic-typecheck
return add_class_to_obj(func(value), cls)
raise
|
96021
|
import asyncio
import gc
import time
import weakref
from koala.typing import *
from koala.logger import logger
__TIMER_ID = 0
def _gen_timer_id() -> int:
global __TIMER_ID
__TIMER_ID += 1
return __TIMER_ID
def _milli_seconds() -> int:
return int(time.time() * 1000)
class ActorTimer:
def __init__(self, weak_actor: weakref.ReferenceType,
actor_id: str,
manager: "ActorTimerManager",
fn: Callable[["ActorTimer"], None],
interval: int):
self._timer_id = _gen_timer_id()
self._weak_actor = weak_actor
self._actor_id = actor_id
self._manager = manager
self._fn = fn
self._interval = interval
self._begin_time = _milli_seconds()
self._tick_count = 0
self._is_cancel = False
def __del__(self):
logger.debug("ActorTimer:%s GC, ActorID:%s" % (self.timer_id, self._actor_id))
pass
@property
def timer_id(self) -> int:
return self._timer_id
@property
def tick_count(self) -> int:
return self._tick_count
@property
def interval(self) -> int:
return self._interval
@property
def is_cancel(self) -> bool:
return self._is_cancel or not self._weak_actor()
def cancel(self):
self._is_cancel = True
def tick(self):
if self.is_cancel:
return
try:
self._tick_count += 1
self._fn(self)
except Exception as e:
logger.error("ActorTimer, Actor:%s, ActorID:%d, Exception:%s" %
(self._actor_id, self.timer_id, e))
return
if not self.is_cancel:
next_wait = self.next_tick_time()
self._manager.internal_register_timer(next_wait, self)
pass
def run(self):
actor = self._weak_actor()
if actor:
asyncio.create_task(actor.context.push_message((None, self)))
else:
self.cancel()
def next_tick_time(self) -> int:
current = _milli_seconds()
next_time = self._begin_time + (self._tick_count + 1) * self._interval
wait_time = next_time - current
return wait_time if wait_time > 0 else 0
class ActorTimerManager:
def __init__(self, weak_actor: weakref.ReferenceType):
self._weak_actor = weak_actor
self._actor_id = ""
self._dict: Dict[int, ActorTimer] = dict()
@property
def actor_id(self) -> str:
from koala.server.actor_base import ActorBase
if self._actor_id == "":
actor: ActorBase = cast(ActorBase, self._weak_actor())
self._actor_id = "%s/%s" % (actor.type_name, actor.uid)
return self._actor_id
@classmethod
async def _run_timer(cls, sleep: int, timer: ActorTimer):
if sleep > 0:
await asyncio.sleep(sleep/1000)
timer.run()
def internal_register_timer(self, next_time: int, timer: ActorTimer):
asyncio.create_task(self._run_timer(next_time, timer))
def register_timer(self, interval: int, fn: Callable[[ActorTimer], None]) -> ActorTimer:
timer = ActorTimer(self._weak_actor, self.actor_id, self, fn, interval)
self._dict[timer.timer_id] = timer
next_wait = timer.next_tick_time()
self.internal_register_timer(next_wait, timer)
return timer
def unregister_timer(self, timer_id: int):
if timer_id in self._dict:
timer = self._dict[timer_id]
self._dict.pop(timer_id)
timer.cancel()
def unregister_all(self):
remove_list = []
for timer_id in self._dict:
remove_list.append(timer_id)
for timer_id in remove_list:
self.unregister_timer(timer_id)
del self._dict
self._dict = {}
|
96060
|
import json
from os import getenv
from pathlib import Path
from typing import TypedDict, List, Any, Union
from .errors_modals import *
class User(TypedDict, total=False):
username: str
password: str
is_default: bool
last_login_time: str
consumed_bytes: int
last_login_result: str
class Config(TypedDict):
users: List[User]
last_login_username: str # 上次登录的用户名
last_ip_addr: str
base_dir = Path.home() if not getenv('IPGW_CONFIG_FILE') else Path(getenv('IPGW_CONFIG_FILE'))
config_file_location = base_dir.joinpath('ipgw.json')
with open(config_file_location, 'r', encoding='utf8') as config_file:
config: Config = json.load(config_file)
def update_config_file():
global config
with open(config_file_location, 'w', encoding='utf8') as writable_config_file:
json.dump(config, writable_config_file, ensure_ascii=False, indent=4)
def add_user(user_dict: User) -> None:
if "is_default" not in user_dict.keys():
user_dict['is_default'] = False
config["users"].append(user_dict)
update_config_file()
def query_user_by_username(username: str) -> Union[User, None]:
users_list = config["users"]
return next((x for x in users_list if x['username'] == username), None)
def query_default_user() -> User:
users_list = config["users"]
result = next((x for x in users_list if x['is_default']), None)
if not result:
raise NoDefaultUserError
return result
def set_default_username(username: str) -> None:
find = False
for user in config["users"]:
if user["username"] == username:
user["is_default"] = True
find = True
else:
user["is_default"] = False
if not find:
raise UsernameNotInConfigFileError
update_config_file()
def update_last_login_info(username="", ip_addr=""):
if username:
config['last_login_username'] = username
if ip_addr:
config['last_ip_addr'] = ip_addr
update_config_file()
def query_last_user() -> User:
return query_user_by_username(config['last_login_username'])
|
96081
|
from typing import Any, Dict, Optional, cast
from django import forms
from django.core.exceptions import ValidationError
from django.forms.models import ModelChoiceIterator
class Autocomplete(forms.Select):
template_name = "reactivated/autocomplete"
def get_context(
self, name: str, value: Any, attrs: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
choices = cast(ModelChoiceIterator, self.choices)
assert choices.queryset is not None
assert hasattr(
choices.queryset, "autocomplete"
), "Models marked for autocompletion must implement autocomplete(query: str) at the manager level"
to_field_name = choices.field.to_field_name or "pk"
# context = forms.Widget.get_context(self, name, value, attrs)
# self.choices.queryset = self.choices.queryset._clone()[:10]
# context = super().get_context(name, value, attrs)
context = forms.Widget.get_context(self, name, value, attrs)
try:
selected = choices.field.to_python(value)
except ValidationError:
selected = None
if selected is not None:
context["widget"]["selected"] = {
"value": getattr(selected, to_field_name),
"label": str(selected),
}
else:
context["widget"]["selected"] = None
return context
|
96117
|
import os.path as osp
from PIL import Image
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class GBU(Dataset):
def __init__(self, path, indices_list, labels, stage="train"):
"""This is a dataloader for the processed good bad ugly dataset.
Assuming that the images have been cropped and save as 0-indexed
files in the path. Assuming the indices list is also 0-indexed.
Args:
path (str): path of the processed dataset
indices_list (list): list of image indices to be list
labels (list): contains integer labels for the images
stage (str, optional): loads a different transforms if test. Defaults to 'train'.
"""
self.data = []
for i in range(len(indices_list)):
image_file = str(indices_list[i]) + ".jpg"
self.data.append((osp.join(path, image_file), labels[i]))
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
if stage == "train":
self.transforms = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
if stage == "test":
self.transforms = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, label = self.data[i]
image = Image.open(path).convert("RGB")
image = self.transforms(image)
if (
image.shape[0] != 3
or image.shape[1] != 224
or image.shape[2] != 224
):
print("you should delete this guy:", path)
return image, label
|
96180
|
from __future__ import absolute_import, print_function, unicode_literals
from .interface import Action
from .named_shell_task import render_task
_DEPROVISION_TITLE = "DEPROVISION CLOUD RESOURCES"
_DEPROVISION_ACTION = "oct deprovision"
class DeprovisionAction(Action):
"""
A DeprovisionAction generates a post-build
step that deprovisions the remote host.
"""
def generate_post_build_steps(self):
return [render_task(
title=_DEPROVISION_TITLE,
command=_DEPROVISION_ACTION,
output_format=self.output_format
)]
|
96195
|
from setuptools import setup
setup(name='seleniumapis',
version='0.1',
description='Query Vanguard and other sites using the Selenium API',
author='<NAME>',
author_email='<EMAIL>',
packages=['vanguard'],
install_requires=[
'selenium',
'nose'
],
zip_safe=False)
|
96202
|
import dcase_util
data = dcase_util.utils.Example.feature_container()
data_aggregator = dcase_util.data.Aggregator(
recipe=['flatten'],
win_length_frames=10,
hop_length_frames=1,
)
data_aggregator.aggregate(data)
data.plot()
|
96208
|
from importlib import import_module
from shutil import copytree
from datetime import date
from logging import Logger
from foliant.utils import spinner
class BaseBackend():
'''Base backend. All backends must inherit from this one.'''
targets = ()
required_preprocessors_before = ()
required_preprocessors_after = ()
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False):
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.working_dir = self.project_path / self.config['tmp_dir']
def get_slug(self) -> str:
'''Generate a slug from the project title and version and the current date.
Spaces in title are replaced with underscores, then the version and the current date
are appended.
'''
if 'slug' in self.config:
return self.config['slug']
components = []
components.append(self.config['title'].replace(' ', '_'))
version = self.config.get('version')
if version:
components.append(str(version))
components.append(str(date.today()))
return '-'.join(components)
def apply_preprocessor(self, preprocessor: str or dict):
'''Apply preprocessor.
:param preprocessor: Preprocessor name or a dict of the preprocessor name and its options
'''
if isinstance(preprocessor, str):
preprocessor_name, preprocessor_options = preprocessor, {}
elif isinstance(preprocessor, dict):
(preprocessor_name, preprocessor_options), = (*preprocessor.items(),)
with spinner(
f'Applying preprocessor {preprocessor_name}',
self.logger,
self.quiet,
self.debug
):
try:
preprocessor_module = import_module(f'foliant.preprocessors.{preprocessor_name}')
preprocessor_module.Preprocessor(
self.context,
self.logger,
self.quiet,
self.debug,
preprocessor_options
).apply()
except ModuleNotFoundError:
raise ModuleNotFoundError(f'Preprocessor {preprocessor_name} is not installed')
except Exception as exception:
raise RuntimeError(
f'Failed to apply preprocessor {preprocessor_name}: {exception}'
)
def preprocess_and_make(self, target: str) -> str:
'''Apply preprocessors required by the selected backend and defined in the config file,
then run the ``make`` method.
:param target: Output format: pdf, docx, html, etc.
:returns: Result as returned by the ``make`` method
'''
src_path = self.project_path / self.config['src_dir']
copytree(src_path, self.working_dir)
common_preprocessors = (
*self.required_preprocessors_before,
*self.config.get('preprocessors', ()),
*self.required_preprocessors_after
)
if self.config.get('escape_code', False):
if isinstance(self.config['escape_code'], dict):
escapecode_preprocessor = {
'escapecode': self.config['escape_code'].get('options', {})
}
else:
escapecode_preprocessor = 'escapecode'
preprocessors = (
escapecode_preprocessor,
*common_preprocessors,
'unescapecode'
)
elif self.config.get('disable_implicit_unescape', False):
preprocessors = common_preprocessors
else:
preprocessors = (
*common_preprocessors,
'_unescape'
)
for preprocessor in preprocessors:
self.apply_preprocessor(preprocessor)
return self.make(target)
def make(self, target: str) -> str:
'''Make the output from the source. Must be implemented by every backend.
:param target: Output format: pdf, docx, html, etc.
:returns: Typically, the path to the output file, but in general any string
'''
raise NotImplementedError
|
96227
|
import numpy as np
from ..utils.dictionary import get_lambda_max
def simulate_data(n_times, n_times_atom, n_atoms, n_channels, noise_level,
random_state=None):
rng = np.random.RandomState(random_state)
rho = n_atoms / (n_channels * n_times_atom)
D = rng.normal(scale=10.0, size=(n_atoms, n_channels, n_times_atom))
D = np.array(D)
nD = np.sqrt((D * D).sum(axis=-1, keepdims=True))
D /= nD + (nD == 0)
Z = (rng.rand(n_atoms, (n_times - 1) * n_times_atom + 1) < rho
).astype(np.float64)
Z *= rng.normal(scale=10, size=(n_atoms, (n_times - 1) * n_times_atom + 1))
X = np.array([[np.convolve(zk, dk, 'full') for dk in Dk]
for Dk, zk in zip(D, Z)]).sum(axis=0)
X += noise_level * rng.normal(size=X.shape)
lmbd_max = get_lambda_max(X, D)
return X, D, lmbd_max
|
96228
|
import rsa
import json
class LazyUser(object):
def __init__(self):
self.pub, self.priv = rsa.newkeys(512)
def sign(self, transaction):
message = json.dumps(transaction.to_dict(), sort_keys=True).encode('utf-8')
signature = rsa.sign(message, self.priv, 'SHA-256')
return (message, signature)
|
96278
|
class TaskException(Exception):
"""
Like classic Exception, but we keep the task result, which gets spoiled by celery cache result backend.
The kwargs parameter is used for passing custom metadata.
"""
obj = None
def __init__(self, result, msg=None, **kwargs):
if msg:
result['detail'] = msg
self.result = result
self.msg = msg
self.__dict__.update(kwargs)
super(TaskException, self).__init__(result)
class MgmtTaskException(Exception):
"""Custom exception for nice dictionary output"""
def __init__(self, msg, **kwargs):
self.result = {'detail': msg}
self.msg = msg
self.__dict__.update(kwargs)
super(MgmtTaskException, self).__init__(self.result)
class TaskRetry(Exception):
"""Our task retry exception"""
pass
class PingFailed(Exception):
"""Task queue ping failed due to timeout or some IO error"""
pass
class UserTaskError(Exception):
"""Error in que.user_tasks.UserTasks"""
pass
class CallbackError(Exception):
"""Callback task problem (used by que.callbacks)"""
pass
class TaskLockError(Exception):
"""Lock error"""
pass
class NodeError(Exception):
"""General error on compute node"""
pass
|
96294
|
from copy import copy
import numpy as np
from gym_chess import ChessEnvV1
from gym_chess.envs.chess_v1 import (
KING_ID,
QUEEN_ID,
ROOK_ID,
BISHOP_ID,
KNIGHT_ID,
PAWN_ID,
)
from gym_chess.test.utils import run_test_funcs
# Blank board
BASIC_BOARD = np.array([[0] * 8] * 8, dtype=np.int8)
# Pawn basic movements
def test_pawn_basic_moves():
BOARD = copy(BASIC_BOARD)
BOARD[6, 0] = PAWN_ID
BOARD[1, 0] = -PAWN_ID
env = ChessEnvV1(opponent="none", initial_state=BOARD)
# player_1
actions = env.get_possible_actions()
env.step(actions[0])
# player_2
actions = env.get_possible_actions()
env.step(actions[0])
# player_3
actions = env.get_possible_actions()
env.step(actions[0])
# player_4
actions = env.get_possible_actions()
env.step(actions[0])
EXPECTED_BOARD = copy(BASIC_BOARD)
EXPECTED_BOARD[4, 0] = PAWN_ID
EXPECTED_BOARD[3, 0] = -PAWN_ID
assert (env.state == EXPECTED_BOARD).all()
if __name__ == "__main__":
run_test_funcs(__name__)
|
96306
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from Models.AutoEncoder import create_layer
def create_encoder_block(in_channels, out_channels, kernel_size, wn=True, bn=True,
activation=nn.ReLU, layers=2):
encoder = []
for i in range(layers):
_in = out_channels
_out = out_channels
if i == 0:
_in = in_channels
encoder.append(create_layer(_in, _out, kernel_size, wn, bn, activation, nn.Conv2d))
return nn.Sequential(*encoder)
def create_decoder_block(in_channels, out_channels, kernel_size, wn=True, bn=True,
activation=nn.ReLU, layers=2, final_layer=False):
decoder = []
for i in range(layers):
_in = in_channels
_out = in_channels
_bn = bn
_activation = activation
if i == 0:
_in = in_channels * 2
if i == layers - 1:
_out = out_channels
if final_layer:
_bn = False
_activation = None
decoder.append(create_layer(_in, _out, kernel_size, wn, _bn, _activation, nn.ConvTranspose2d))
return nn.Sequential(*decoder)
def create_encoder(in_channels, filters, kernel_size, wn=True, bn=True, activation=nn.ReLU, layers=2):
encoder = []
for i in range(len(filters)):
if i == 0:
encoder_layer = create_encoder_block(in_channels, filters[i], kernel_size, wn, bn, activation, layers)
else:
encoder_layer = create_encoder_block(filters[i-1], filters[i], kernel_size, wn, bn, activation, layers)
encoder = encoder + [encoder_layer]
return nn.Sequential(*encoder)
def create_decoder(out_channels, filters, kernel_size, wn=True, bn=True, activation=nn.ReLU, layers=2):
decoder = []
for i in range(len(filters)):
if i == 0:
decoder_layer = create_decoder_block(filters[i], out_channels, kernel_size, wn, bn, activation, layers, final_layer=True)
else:
decoder_layer = create_decoder_block(filters[i], filters[i-1], kernel_size, wn, bn, activation, layers, final_layer=False)
decoder = [decoder_layer] + decoder
return nn.Sequential(*decoder)
class UNetEx(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, filters=[16, 32, 64], layers=2,
weight_norm=True, batch_norm=True, activation=nn.ReLU, final_activation=None):
super().__init__()
assert len(filters) > 0
self.final_activation = final_activation
self.encoder = create_encoder(in_channels, filters, kernel_size, weight_norm, batch_norm, activation, layers)
decoders = []
for i in range(out_channels):
decoders.append(create_decoder(1, filters, kernel_size, weight_norm, batch_norm, activation, layers))
self.decoders = nn.Sequential(*decoders)
def encode(self, x):
tensors = []
sizes = []
for encoder in self.encoder:
x = encoder(x)
sizes.append(x.size())
tensors.append(x)
x = F.avg_pool2d(x, 2, 2)
return x, tensors, sizes
def decode(self, _x, _tensors, _sizes):
y = []
for _decoder in self.decoders:
x = _x
tensors = _tensors[:]
sizes = _sizes[:]
for decoder in _decoder:
tensor = tensors.pop()
size = sizes.pop()
x = F.interpolate(x, size=size[-2:])
x = torch.cat([tensor, x], dim=1)
x = decoder(x)
y.append(x)
return torch.cat(y, dim=1)
def forward(self, x):
x, tensors, sizes = self.encode(x)
x = self.decode(x, tensors, sizes)
if self.final_activation is not None:
x = self.final_activation(x)
return x
|
96346
|
from bench_db import get_timings_by_id
from bench_graphs import do_warmup_plot
num_iter = 7
color_copy_by_ref = 'green'
name = 'native-'
def warmup_all_plots():
warmup_plot_fannkuch()
warmup_plot_spectralnorm()
warmup_plot_bintree()
def warmup_plot_fannkuch():
ids = [
100 # fannkuchredux-1, 2020-08-30 21:14, graalphp-native
, 102 # fannkuchredux-1, 2020-08-30 21:39, graalphp-native
, 104 # fannkuchredux-1, 2020-08-30 22:03, graalphp-native
, 106 # fannkuchredux-1, 2020-08-30 22:28, graalphp-native
, 108 # fannkuchredux-1, 2020-08-30 22:52, graalphp-native
, 110 # fannkuchredux-1, 2020-08-30 23:16, graalphp-native
, 112 # fannkuchredux-1, 2020-08-30 23:40, graalphp-native
, 114 # fannkuchredux-1, 2020-08-31 00:04, graalphp-native
, 116 # fannkuchredux-1, 2020-08-31 00:28, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids]
do_warmup_plot('fannkuchredux \ncopy-by-val', runs, num_iter=num_iter, subtitle='',
file_prefix=name)
pass
def warmup_plot_spectralnorm():
ids_by_val = [
118 # spectralnorm-by-val, 2020-08-31 00:31:52, graalphp-native
, 122 # spectralnorm-by-val, 2020-08-31 00:37:51, graalphp-native
, 126 # spectralnorm-by-val, 2020-08-31 00:43:51, graalphp-native
, 130 # spectralnorm-by-val, 2020-08-31 00:49:50, graalphp-native
, 134 # spectralnorm-by-val, 2020-08-31 00:55:49, graalphp-native
, 138 # spectralnorm-by-val, 2020-08-31 01:01:48, graalphp-native
, 142 # spectralnorm-by-val, 2020-08-31 01:07:47, graalphp-native
, 146 # spectralnorm-by-val, 2020-08-31 01:13:47, graalphp-native
, 150 # spectralnorm-by-val, 2020-08-31 01:19:46, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_val]
do_warmup_plot('spectralnorm \ncopy-by-val', runs, num_iter=num_iter,
file_prefix=name)
ids_by_ref = [
120 # spectralnorm-by-ref 2020-08-31 00:34:57, graalphp-native
, 124 # spectralnorm-by-ref 2020-08-31 00:40:56, graalphp-native
, 128 # spectralnorm-by-ref 2020-08-31 00:46:55, graalphp-native
, 132 # spectralnorm-by-ref 2020-08-31 00:52:54, graalphp-native
, 136 # spectralnorm-by-ref 2020-08-31 00:58:54, graalphp-native
, 140 # spectralnorm-by-ref 2020-08-31 01:04:53, graalphp-native
, 144 # spectralnorm-by-ref 2020-08-31 01:10:52, graalphp-native
, 148 # spectralnorm-by-ref 2020-08-31 01:16:51, graalphp-native
, 152 # spectralnorm-by-ref 2020-08-31 01:22:50, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_ref]
do_warmup_plot('spectralnorm \ncopy-by-ref', runs, num_iter=num_iter,
color=color_copy_by_ref,
file_prefix=name)
pass
def warmup_plot_bintree():
ids_by_val = [
156 # binary-trees-by-val, 2020-09-01 01:57:02, graalphp-native
, 160 # binary-trees-by-val, 2020-09-01 07:20:34, graalphp-native
, 164 # binary-trees-by-val, 2020-09-01 12:48:38, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_val]
do_warmup_plot('binary-trees \ncopy-by-val', runs, num_iter=num_iter,
file_prefix=name)
ids_by_ref = [
169 # binary-trees-by-ref, 2020-09-01 14:40:30, graalphp-native
, 171 # binary-trees-by-ref, 2020-09-01 14:48:10, graalphp-native
, 173 # binary-trees-by-ref, 2020-09-01 14:55:38, graalphp-native
, 175 # binary-trees-by-ref, 2020-09-01 15:03:06, graalphp-native
, 177 # binary-trees-by-ref, 2020-09-01 15:10:40, graalphp-native
, 179 # binary-trees-by-ref, 2020-09-01 15:18:15, graalphp-native
, 181 # binary-trees-by-ref, 2020-09-01 15:25:48, graalphp-native
, 183 # binary-trees-by-ref, 2020-09-01 15:33:23, graalphp-native
, 185 # binary-trees-by-ref, 2020-09-01 15:40:56, graalphp-native
]
runs = [get_timings_by_id(i, warmup=0) for i in ids_by_ref]
do_warmup_plot('binary-trees \ncopy-by-ref', runs, num_iter=num_iter,
color=color_copy_by_ref,
file_prefix=name)
pass
if __name__ == '__main__':
warmup_plot_fannkuch()
warmup_plot_spectralnorm()
warmup_plot_bintree()
|
96348
|
import os
from django.db import transaction
import csv
from django.core.management.base import BaseCommand, CommandError
from api.models import Country
def get_bool(value):
if value == 't':
return True
elif value == 'f':
return False
else:
return None
def get_int(value):
# print('GET INT', value)
if value == '':
return None
else:
return int(float(value))
class Command(BaseCommand):
help = "Import countries data from CSV (only to be used on staging)"
missing_args_message = "Filename is missing. Filename / path to CSV file is a required argument."
def add_arguments(self, parser):
parser.add_argument('filename', nargs='+', type=str)
@transaction.atomic
def handle(self, *args, **options):
filename = options['filename'][0]
boolean_fields = ['is_deprecated', 'independent']
int_fields = ['record_type', 'wp_population', 'wb_year', 'region_id', 'inform_score']
fields_to_save = [
'name',
'name_en',
'name_es',
'name_fr',
'name_ar',
'iso',
'society_name',
'society_name_en',
'society_name_es',
'society_name_fr',
'society_name_ar',
'society_url',
'key_priorities',
'logo',
'iso3',
'url_ifrc',
'centroid',
'bbox'] + boolean_fields + int_fields
if not os.path.exists(filename):
print('File does not exist. Check path?')
return
with open(filename) as csvfile:
all_ids = []
reader = csv.DictReader(csvfile)
for row in reader:
id = int(row.pop('id'))
all_ids.append(id)
try:
country = Country.objects.get(pk=id)
except:
country = Country()
for key in row.keys():
# print(key)
if key in boolean_fields:
val = get_bool(row[key])
elif key in int_fields:
val = get_int(row[key])
else:
val = row[key]
if key in fields_to_save:
country.__setattr__(key, val)
country.save()
print('SUCCESS', country.name_en)
print('done importing countries')
existing_country_ids = [c.id for c in Country.objects.all()]
countries_not_in_csv = list(set(existing_country_ids) - set(all_ids))
for country_id in countries_not_in_csv:
c = Country.objects.get(pk=country_id)
c.delete()
print('deleted ids', countries_not_in_csv)
|
96369
|
import os
import sys
import cv2
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import scipy.misc as sic
class Cube2Equirec(nn.Module):
def __init__(self, cube_length, equ_h):
super().__init__()
self.cube_length = cube_length
self.equ_h = equ_h
equ_w = equ_h * 2
self.equ_w = equ_w
theta = (np.arange(equ_w) / (equ_w-1) - 0.5) * 2 *np.pi
phi = (np.arange(equ_h) / (equ_h-1) - 0.5) * np.pi
theta, phi = np.meshgrid(theta, phi)
x = np.sin(theta) * np.cos(phi)
y = np.sin(phi)
z = np.cos(theta) * np.cos(phi)
xyz = np.concatenate([x[..., None], y[..., None], z[..., None]], axis=-1)
planes = np.asarray([
[0, 0, 1, 1], # z = -1
[0, 1, 0, -1], # y = 1
[0, 0, 1, -1], # z = 1
[1, 0, 0, 1], # x = -1
[1, 0, 0, -1], # x = 1
[0, 1, 0, 1] # y = -1
])
r_lst = np.array([
[0, 1, 0],
[0.5, 0, 0],
[0, 0, 0],
[0, 0.5, 0],
[0, -0.5, 0],
[-0.5, 0, 0]
]) * np.pi
f = cube_length / 2.0
self.K = np.array([
[f, 0, (cube_length-1)/2.0],
[0, f, (cube_length-1)/2.0],
[0, 0, 1]
])
self.R_lst = [cv2.Rodrigues(x)[0] for x in r_lst]
self.mask, self.XY = self._intersection(xyz, planes)
def forward(self, x, mode='bilinear'):
assert mode in ['nearest', 'bilinear']
assert x.shape[0] % 6 == 0
equ_count = x.shape[0] // 6
equi = torch.zeros(equ_count, x.shape[1], self.equ_h, self.equ_w).to(x.device)
for i in range(6):
now = x[i::6, ...]
mask = self.mask[i].to(x.device)
mask = mask[None, ...].repeat(equ_count, x.shape[1], 1, 1)
XY = (self.XY[i].to(x.device)[None, None, :, :].repeat(equ_count, 1, 1, 1) / (self.cube_length-1) - 0.5) * 2
sample = F.grid_sample(now, XY, mode=mode, align_corners=True)[..., 0, :]
equi[mask] = sample.view(-1)
return equi
def _intersection(self, xyz, planes):
abc = planes[:, :-1]
depth = -planes[:, 3][None, None, ...] / np.dot(xyz, abc.T)
depth[depth < 0] = np.inf
arg = np.argmin(depth, axis=-1)
depth = np.min(depth, axis=-1)
pts = depth[..., None] * xyz
mask_lst = []
mapping_XY = []
for i in range(6):
mask = arg == i
mask = np.tile(mask[..., None], [1, 1, 3])
XY = np.dot(np.dot(pts[mask].reshape([-1, 3]), self.R_lst[i].T), self.K.T)
XY = np.clip(XY[..., :2].copy() / XY[..., 2:], 0, self.cube_length-1)
mask_lst.append(mask[..., 0])
mapping_XY.append(XY)
mask_lst = [torch.BoolTensor(x) for x in mask_lst]
mapping_XY = [torch.FloatTensor(x) for x in mapping_XY]
return mask_lst, mapping_XY
if __name__ == '__main__':
import matplotlib.pyplot as plt
batch = torch.zeros(12, 3, 256, 256) + 20
c2e = Cube2Equirec(256, 512)
equi = c2e(batch)
plt.imshow(equi[0, ...].permute(1, 2, 0).cpu().numpy())
plt.show()
|
96447
|
import time
from abc import abstractmethod
from datetime import datetime
from typing import Optional, List, Tuple, Union
from bxcommon.messages.bloxroute.tx_message import TxMessage
from bxcommon.models.transaction_flag import TransactionFlag
from bxcommon.utils import crypto, convert
from bxcommon.utils.blockchain_utils.bdn_tx_to_bx_tx import bdn_tx_to_bx_tx
from bxcommon.utils.object_hash import Sha256Hash
from bxcommon import constants as common_constants
from bxgateway import ont_constants
from bxgateway.abstract_message_converter import AbstractMessageConverter, BlockDecompressionResult
from bxgateway.messages.ont.block_ont_message import BlockOntMessage
from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage
from bxgateway.messages.ont.ont_message import OntMessage
from bxgateway.messages.ont.tx_ont_message import TxOntMessage
from bxgateway.utils.block_info import BlockInfo
def get_block_info(
bx_block: memoryview,
block_hash: Sha256Hash,
short_ids: List[int],
decompress_start_datetime: datetime,
decompress_start_timestamp: float,
total_tx_count: Optional[int] = None,
ont_block_msg: Optional[Union[BlockOntMessage, OntConsensusMessage]] = None
) -> BlockInfo:
if ont_block_msg is not None:
bx_block_hash = convert.bytes_to_hex(crypto.double_sha256(bx_block))
compressed_size = len(bx_block)
prev_block_hash = convert.bytes_to_hex(ont_block_msg.prev_block_hash().binary)
ont_block_len = len(ont_block_msg.rawbytes())
compression_rate = 100 - float(compressed_size) / ont_block_len * 100
else:
bx_block_hash = None
compressed_size = None
prev_block_hash = None
ont_block_len = None
compression_rate = None
return BlockInfo(
block_hash,
short_ids,
decompress_start_datetime,
datetime.utcnow(),
(time.time() - decompress_start_timestamp) * 1000,
total_tx_count,
bx_block_hash,
prev_block_hash,
ont_block_len,
compressed_size,
compression_rate,
[]
)
class AbstractOntMessageConverter(AbstractMessageConverter):
def __init__(self, ont_magic: int):
self._ont_magic = ont_magic
@abstractmethod
def block_to_bx_block(
self, block_msg, tx_service, enable_block_compression: bool, min_tx_age_seconds: float
) -> Tuple[memoryview, BlockInfo]:
"""
Pack a blockchain block's transactions into a bloXroute block.
"""
pass
@abstractmethod
def bx_block_to_block(self, bx_block_msg, tx_service) -> BlockDecompressionResult:
"""
Uncompresses a bx_block from a broadcast bx_block message and converts to a raw ONT bx_block.
bx_block must be a memoryview, since memoryview[offset] returns a bytearray, while bytearray[offset] returns
a byte.
"""
pass
# pyre-fixme[14]: `bx_tx_to_tx` overrides method defined in
# `AbstractMessageConverter` inconsistently.
def bx_tx_to_tx(self, tx_msg: TxMessage):
# pyre-fixme[6]: Expected `bytes` for 1st param but got `memoryview`.
buf = bytearray(ont_constants.ONT_HDR_COMMON_OFF) + tx_msg.tx_val()
raw_ont_tx_msg = OntMessage(self._ont_magic, TxOntMessage.MESSAGE_TYPE, len(tx_msg.tx_val()), buf)
ont_tx_msg = TxOntMessage(buf=raw_ont_tx_msg.buf)
return ont_tx_msg
def tx_to_bx_txs(
self,
tx_msg,
network_num: int,
transaction_flag: Optional[TransactionFlag] = None,
min_tx_network_fee: int = 0,
account_id: str = common_constants.DECODED_EMPTY_ACCOUNT_ID
) -> List[Tuple[TxMessage, Sha256Hash, Union[bytearray, memoryview]]]:
bx_tx_msg = TxMessage(
tx_msg.tx_hash(),
network_num,
tx_val=tx_msg.tx(),
transaction_flag=transaction_flag,
account_id=account_id
)
return [(bx_tx_msg, tx_msg.tx_hash(), tx_msg.tx())]
def bdn_tx_to_bx_tx(
self,
raw_tx: Union[bytes, bytearray, memoryview],
network_num: int,
transaction_flag: Optional[TransactionFlag] = None,
account_id: str = common_constants.DECODED_EMPTY_ACCOUNT_ID
) -> TxMessage:
return bdn_tx_to_bx_tx(raw_tx, network_num, transaction_flag, account_id)
|
96459
|
model_space_filename = 'path/to/metrics.json'
model_sampling_rules = dict(
type='sequential',
rules=[
# 1. select model with best performance, could replace with your own metrics
dict(
type='sample',
operation='top',
# replace with customized metric in your own tasks, e.g. `metric.finetune.bdd100k_bbox_mAP`
key='metric.finetune.coco_bbox_mAP',
value=1,
mode='number',
),
])
|
96480
|
from notifications_utils.clients.antivirus.antivirus_client import (
AntivirusClient,
)
from notifications_utils.clients.redis.redis_client import RedisClient
from notifications_utils.clients.zendesk.zendesk_client import ZendeskClient
antivirus_client = AntivirusClient()
zendesk_client = ZendeskClient()
redis_client = RedisClient()
|
96484
|
import contextlib
import errno
import os
import sys
import tempfile
MS_WINDOWS = (sys.platform == 'win32')
@contextlib.contextmanager
def temporary_file():
tmp_filename = tempfile.mktemp()
try:
yield tmp_filename
finally:
try:
os.unlink(tmp_filename)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
|
96501
|
import sys
from unittest import TestCase
from dropSQL.ast import ColumnDef, IntegerTy, VarCharTy
from dropSQL.fs.db_file import DBFile
from dropSQL.parser.tokens.identifier import Identifier
class LayoutCase(TestCase):
def test(self):
connection = DBFile(":memory:")
db_name = "Database name!"
metadata = connection.metadata
metadata.name = db_name
self.assertEqual(metadata.name, db_name,
msg="Failed to read database name: {}".format(metadata.name))
self.assertEqual(metadata.data_blocks_count, 0,
msg="Initial data blocks count is not zero")
tables = connection.get_tables()
for index, table in enumerate(tables):
table_name = Identifier("Table {}!".format(index))
table.set_table_name(table_name)
self.assertEqual(table.get_table_name(), table_name, msg="Failed to read table name")
table.add_column(ColumnDef(Identifier("ind"), IntegerTy()))
table.add_column(ColumnDef(Identifier("text"), VarCharTy(15)))
for i in range(0, 10 ** 4):
# sys.stderr.write("Inserting record {} into {}\n".format(i, index))
table.insert([i, "qwerty123456"]).ok()
if i % 3 == 0: table.delete(i).ok()
if i % 3 == 1: table.update([-i, "123456qwerty"], i).ok()
res = table.select(i)
if not res: continue
values = res.ok()
self.assertEqual(abs(values[0]), i,
msg="received({}): {}".format(i, values[0]))
self.assertIn(values[1], ("qwerty123456", "123456qwerty"),
msg="received({}): {}".format(i, values[1]))
sys.stderr.write('Record count: {}\n'.format(table.count_records()))
sys.stderr.write('{}\n'.format(str(table.get_columns())))
table.drop()
break
sys.stderr.write('Tables: {}\n'.format([t.get_table_name() for t in connection.get_tables()]))
connection.close()
|
96531
|
from .ModelBuilder import ModelBuilder # noqa: F401
from .AbstractTapeModel import AbstractTapeModel # noqa: F401
|
96543
|
import random
import time
from collections import deque
from threading import Lock
import HABApp
from HABApp.core.events import ValueUpdateEvent
from .bench_base import BenchBaseRule
from .bench_times import BenchContainer, BenchTime
LOCK = Lock()
class OpenhabBenchRule(BenchBaseRule):
BENCH_TYPE = 'openHAB'
RTT_BENCH_MAX = 15
def __init__(self):
super().__init__()
self.name_list = [f'BenchItem{k}' for k in range(300)]
self.time_sent = 0.0
self.bench_started = 0.0
self.bench_times_container = BenchContainer()
self.bench_times: BenchTime = None
self.item_values = deque()
self.item_name = ''
self.load_listener = []
def cleanup(self):
self.stop_load()
all_items = set(HABApp.core.Items.get_all_item_names())
to_rem = set(self.name_list) & all_items
if not to_rem:
return None
print('Cleanup ... ', end='')
for name in to_rem:
self.oh.remove_item(name)
print('complete')
def set_up(self):
self.cleanup()
def tear_down(self):
self.cleanup()
def run_bench(self):
# These are the benchmarks
self.bench_item_create()
self.bench_rtt_time()
def bench_item_create(self):
print('Bench item operations ', end='')
max_duration = 10 # how long should each bench take
times = BenchContainer()
start_bench = time.time()
b = times.create('create item')
for k in self.name_list:
start = time.time()
self.openhab.create_item('Number', k, label='MyLabel')
b.times.append(time.time() - start)
# limit bench time on weak devices
if time.time() - start_bench > max_duration:
break
time.sleep(0.2)
print('.', end='')
start_bench = time.time()
b = times.create('update item')
for k in self.name_list:
start = time.time()
self.openhab.create_item('Number', k, label='New Label')
b.times.append(time.time() - start)
# limit bench time on weak devices
if time.time() - start_bench > max_duration:
break
time.sleep(0.2)
print('.', end='')
start_bench = time.time()
b = times.create('delete item')
for k in self.name_list:
start = time.time()
self.openhab.remove_item(k)
b.times.append(time.time() - start)
# limit bench time on weak devices
if time.time() - start_bench > max_duration:
break
print('. done!\n')
times.show()
def bench_rtt_time(self):
print('Bench item state update ', end='')
self.bench_times_container = BenchContainer()
self.run_rtt('rtt idle')
self.run_rtt('async rtt idle', do_async=True)
self.start_load()
self.run_rtt('rtt load (+10x)')
self.run_rtt('async rtt load (+10x)', do_async=True)
self.stop_load()
print(' done!\n')
self.bench_times_container.show()
def start_load(self):
for i in range(10, 20):
def load_cb(event, item=self.name_list[i]):
self.openhab.post_update(item, str(random.randint(0, 99999999)))
self.openhab.create_item('String', self.name_list[i], label='MyLabel')
listener = self.listen_event(self.name_list[i], load_cb, ValueUpdateEvent)
self.load_listener.append(listener)
self.openhab.post_update(self.name_list[i], str(random.randint(0, 99999999)))
def stop_load(self):
for list in self.load_listener:
list.cancel()
self.load_listener.clear()
def run_rtt(self, test_name, do_async=False):
self.item_name = self.name_list[0]
self.openhab.create_item('String', self.item_name, label='MyLabel')
for i in range(3000):
self.item_values.append(str(random.randint(0, 99999999)))
listener = self.listen_event(
self.item_name, self.proceed_item_val if not do_async else self.a_proceed_item_val, ValueUpdateEvent
)
self.bench_times = self.bench_times_container.create(test_name)
self.bench_started = time.time()
self.time_sent = time.time()
self.openhab.post_update(self.item_name, self.item_values[0])
self.run.soon(LOCK.acquire)
time.sleep(1)
LOCK.acquire(True, OpenhabBenchRule.RTT_BENCH_MAX)
listener.cancel()
if LOCK.locked():
LOCK.release()
print('.', end='')
def proceed_item_val(self, event: ValueUpdateEvent):
if event.value != self.item_values[0]:
return None
self.bench_times.times.append(time.time() - self.time_sent)
# Time up -> stop benchmark
if time.time() - self.bench_started > OpenhabBenchRule.RTT_BENCH_MAX:
LOCK.release()
return None
# No items left -> stop benchmark
try:
self.item_values.popleft()
except IndexError:
LOCK.release()
return None
self.time_sent = time.time()
self.openhab.post_update(self.item_name, self.item_values[0])
async def a_proceed_item_val(self, event: ValueUpdateEvent):
self.proceed_item_val(event)
|
96593
|
from django.contrib.staticfiles import storage
# Configure the permissions used by ./manage.py collectstatic
# See https://docs.djangoproject.com/en/1.10/ref/contrib/staticfiles/
class TTStaticFilesStorage(storage.StaticFilesStorage):
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o644
kwargs['directory_permissions_mode'] = 0o755
super(TTStaticFilesStorage, self).__init__(*args, **kwargs)
|
96619
|
Clock.bpm=144; Scale.default="lydianMinor"
d1 >> play("x-o{-[-(-o)]}", sample=0).every([28,4], "trim", 3)
d2 >> play("(X )( X)N{ xv[nX]}", drive=0.2, lpf=var([0,40],[28,4]), rate=PStep(P[5:8],[-1,-2],1)).sometimes("sample.offadd", 1, 0.75)
d3 >> play("e", amp=var([0,1],[PRand(8,16)/2,1.5]), dur=PRand([1/2,1/4]), pan=var([-1,1],2))
c1 >> play("#", dur=32, room=1, amp=2).spread()
var.switch = var([0,1],[32])
p1 >> karp(dur=1/4, rate=PWhite(40), pan=PWhite(-1,1), amplify=var.switch, amp=1, room=0.5)
p2 >> sawbass(var([0,1,5,var([4,6],[14,2])],1), dur=PDur(3,8), cutoff=4000, sus=1/2, amplify=var.switch)
p3 >> glass(oct=6, rate=linvar([-2,2],16), shape=0.5, amp=1.5, amplify=var([0,var.switch],64), room=0.5)
|
96628
|
import inspect
from doubles.class_double import ClassDouble
from doubles.exceptions import ConstructorDoubleError
from doubles.lifecycle import current_space
def expect(target):
"""
Prepares a target object for a method call expectation (mock). The name of the method to expect
should be called as a method on the return value of this function::
expect(foo).bar
Accessing the ``bar`` attribute will return an ``Expectation`` which provides additional methods
to configure the mock.
:param object target: The object that will be mocked.
:return: An ``ExpectationTarget`` for the target object.
"""
return ExpectationTarget(target)
def expect_constructor(target):
"""
Set an expectation on a ``ClassDouble`` constructor
:param ClassDouble target: The ClassDouble to set the expectation on.
:return: an ``Expectation`` for the __new__ method.
:raise: ``ConstructorDoubleError`` if target is not a ClassDouble.
"""
if not isinstance(target, ClassDouble):
raise ConstructorDoubleError(
'Cannot allow_constructor of {} since it is not a ClassDouble.'.format(target),
)
return expect(target)._doubles__new__
class ExpectationTarget(object):
"""A wrapper around a target object that creates new expectations on attribute access."""
def __init__(self, target):
"""
:param object target: The object to wrap.
"""
self._proxy = current_space().proxy_for(target)
def __getattribute__(self, attr_name):
"""
Returns the value of existing attributes, and returns a new expectation for any attribute
that doesn't yet exist.
:param str attr_name: The name of the attribute to look up.
:return: The existing value or a new ``Expectation``.
:rtype: object, Expectation
"""
__dict__ = object.__getattribute__(self, '__dict__')
if __dict__ and attr_name in __dict__:
return __dict__[attr_name]
caller = inspect.getframeinfo(inspect.currentframe().f_back)
return self._proxy.add_expectation(attr_name, caller)
|
96661
|
from xml.etree.cElementTree import fromstring
from xmljson import yahoo
import core
from core.helpers import Url
from core.providers.base import NewzNabProvider
from core.providers import torrent_modules # noqa
import logging
logging = logging.getLogger(__name__)
trackers = '&tr='.join(('udp://tracker.leechers-paradise.org:6969',
'udp://zer0day.ch:1337',
'udp://tracker.coppersurfer.tk:6969',
'udp://public.popcorn-tracker.org:6969',
'udp://open.demonii.com:1337/announce',
'udp://tracker.openbittorrent.com:80',
'udp://tracker.coppersurfer.tk:6969',
'udp://glotorrents.pw:6969/announce',
'udp://tracker.opentrackr.org:1337/announce',
'udp://torrent.gresille.org:80/announce',
'udp://p4p.arenabg.com:1337',
'udp://tracker.leechers-paradise.org:6969'
))
def magnet(hash_, title):
''' Creates magnet link
hash_ (str): base64 formatted torrent hash
title (str): name of the torrent
Formats as magnet uri and adds trackers
Returns str margnet uri
'''
return 'magnet:?xt=urn:btih:{}&dn={}&tr={}'.format(hash_, title, trackers)
class Torrent(NewzNabProvider):
def __init__(self):
self.feed_type = 'torrent'
return
def search_all(self, imdbid, title, year, ignore_if_imdbid_cap = False):
''' Performs backlog search for all indexers.
imdbid (str): imdb id #
title (str): movie title
year (str/int): year of movie release
Returns list of dicts with sorted release information.
'''
torz_indexers = core.CONFIG['Indexers']['TorzNab'].values()
results = []
term = Url.normalize('{} {}'.format(title, year))
for indexer in torz_indexers:
if indexer[2] is False:
continue
url_base = indexer[0]
logging.info('Searching TorzNab indexer {}'.format(url_base))
if url_base[-1] != '/':
url_base = url_base + '/'
apikey = indexer[1]
no_year = indexer[3]
caps = core.sql.torznab_caps(url_base)
if not caps:
caps = self._get_caps(url_base, apikey)
if caps is None:
logging.error('Unable to get caps for {}'.format(url_base))
continue
if 'imdbid' in caps:
if ignore_if_imdbid_cap:
return results
logging.info('{} supports imdbid search.'.format(url_base))
r = self.search_newznab(url_base, apikey, 'movie', imdbid=imdbid)
else:
logging.info('{} does not support imdbid search, using q={}'.format(url_base, term))
r = self.search_newznab(url_base, apikey, 'search', q=term, imdbid=imdbid)
if not r and no_year:
logging.info('{} does not find anything, trying without year, using q={}'.format(url_base, title))
r = self.search_newznab(url_base, apikey, 'search', q=title, imdbid=imdbid)
for i in r:
results.append(i)
for indexer, settings in core.CONFIG['Indexers']['Torrent'].items():
if settings['enabled']:
if not hasattr(torrent_modules, indexer):
logging.warning('Torrent indexer {} enabled but not found in torrent_modules.'.format(indexer))
continue
else:
for i in getattr(torrent_modules, indexer).search(imdbid, term, ignore_if_imdbid_cap):
if i not in results:
results.append(i)
for indexer, indexerobject in core.CONFIG['Indexers']['PrivateTorrent'].items():
if indexerobject['enabled']:
if not hasattr(torrent_modules, indexer):
logging.warning('Torrent indexer {} enabled but not found in torrent_modules.'.format(indexer))
continue
else:
for i in getattr(torrent_modules, indexer).search(imdbid, term, ignore_if_imdbid_cap):
if i not in results:
results.append(i)
return results
def get_rss(self):
''' Gets rss from all torznab providers and individual providers
Returns list of dicts of latest movies
'''
logging.info('Syncing Torrent indexer RSS feeds.')
results = []
results = self._get_rss()
for indexer, settings in core.CONFIG['Indexers']['Torrent'].items():
if settings['enabled']:
if not hasattr(torrent_modules, indexer):
logging.warning('Torrent indexer {} enabled but not found in torrent_modules.'.format(indexer))
continue
else:
for i in getattr(torrent_modules, indexer).get_rss():
if i not in results:
results.append(i)
return results
def _get_caps(self, url_base, apikey):
''' Gets caps for indexer url
url_base (str): url of torznab indexer
apikey (str): api key for indexer
Gets indexer caps from CAPS table
Returns list of caps
'''
logging.info('Getting caps for {}'.format(url_base))
url = '{}api?apikey={}&t=caps'.format(url_base, apikey)
try:
xml = Url.open(url).text
caps = yahoo.data(fromstring(xml))['caps']['searching']['movie-search']['supportedParams']
core.sql.write('CAPS', {'url': url_base, 'caps': caps})
except Exception as e:
logging.warning('', exc_info=True)
return None
return caps.split(',')
|
96687
|
import gym
import numpy as np
class SpaceWrapper:
def __init__(self, space):
if isinstance(space, gym.spaces.Discrete):
self.shape = ()
self.dtype = np.dtype(np.int64)
elif isinstance(space, gym.spaces.Box):
self.shape = space.shape
self.dtype = np.dtype(space.dtype)
else:
assert False, "ProcVectorEnv only support Box and Discrete types"
|
96692
|
import numpy as np
import torch.nn.functional as F
import math
from torchvision import transforms
import torch
import cv2
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
matplotlib.use('agg')
MAPS = ['map3','map4']
Scales = [0.9, 1.1]
MIN_HW = 384
MAX_HW = 1584
IM_NORM_MEAN = [0.485, 0.456, 0.406]
IM_NORM_STD = [0.229, 0.224, 0.225]
def select_exemplar_rois(image):
all_rois = []
print("Press 'q' or Esc to quit. Press 'n' and then use mouse drag to draw a new examplar, 'space' to save.")
while True:
key = cv2.waitKey(1) & 0xFF
if key == 27 or key == ord('q'):
break
elif key == ord('n') or key == '\r':
rect = cv2.selectROI("image", image, False, False)
x1 = rect[0]
y1 = rect[1]
x2 = x1 + rect[2] - 1
y2 = y1 + rect[3] - 1
all_rois.append([y1, x1, y2, x2])
for rect in all_rois:
y1, x1, y2, x2 = rect
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
print("Press q or Esc to quit. Press 'n' and then use mouse drag to draw a new examplar")
return all_rois
def matlab_style_gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def PerturbationLoss(output,boxes,sigma=8, use_gpu=True):
Loss = 0.
if boxes.shape[1] > 1:
boxes = boxes.squeeze()
for tempBoxes in boxes.squeeze():
y1 = int(tempBoxes[1])
y2 = int(tempBoxes[3])
x1 = int(tempBoxes[2])
x2 = int(tempBoxes[4])
out = output[:,:,y1:y2,x1:x2]
GaussKernel = matlab_style_gauss2D(shape=(out.shape[2],out.shape[3]),sigma=sigma)
GaussKernel = torch.from_numpy(GaussKernel).float()
if use_gpu: GaussKernel = GaussKernel.cuda()
Loss += F.mse_loss(out.squeeze(),GaussKernel)
else:
boxes = boxes.squeeze()
y1 = int(boxes[1])
y2 = int(boxes[3])
x1 = int(boxes[2])
x2 = int(boxes[4])
out = output[:,:,y1:y2,x1:x2]
Gauss = matlab_style_gauss2D(shape=(out.shape[2],out.shape[3]),sigma=sigma)
GaussKernel = torch.from_numpy(Gauss).float()
if use_gpu: GaussKernel = GaussKernel.cuda()
Loss += F.mse_loss(out.squeeze(),GaussKernel)
return Loss
def MincountLoss(output,boxes, use_gpu=True):
ones = torch.ones(1)
if use_gpu: ones = ones.cuda()
Loss = 0.
if boxes.shape[1] > 1:
boxes = boxes.squeeze()
for tempBoxes in boxes.squeeze():
y1 = int(tempBoxes[1])
y2 = int(tempBoxes[3])
x1 = int(tempBoxes[2])
x2 = int(tempBoxes[4])
X = output[:,:,y1:y2,x1:x2].sum()
if X.item() <= 1:
Loss += F.mse_loss(X,ones)
else:
boxes = boxes.squeeze()
y1 = int(boxes[1])
y2 = int(boxes[3])
x1 = int(boxes[2])
x2 = int(boxes[4])
X = output[:,:,y1:y2,x1:x2].sum()
if X.item() <= 1:
Loss += F.mse_loss(X,ones)
return Loss
def pad_to_size(feat, desire_h, desire_w):
""" zero-padding a four dim feature matrix: N*C*H*W so that the new Height and Width are the desired ones
desire_h and desire_w should be largers than the current height and weight
"""
cur_h = feat.shape[-2]
cur_w = feat.shape[-1]
left_pad = (desire_w - cur_w + 1) // 2
right_pad = (desire_w - cur_w) - left_pad
top_pad = (desire_h - cur_h + 1) // 2
bottom_pad =(desire_h - cur_h) - top_pad
return F.pad(feat, (left_pad, right_pad, top_pad, bottom_pad))
def extract_features(feature_model, image, boxes,feat_map_keys=['map3','map4'], exemplar_scales=[0.9, 1.1]):
N, M = image.shape[0], boxes.shape[2]
"""
Getting features for the image N * C * H * W
"""
Image_features = feature_model(image)
"""
Getting features for the examples (N*M) * C * h * w
"""
for ix in range(0,N):
# boxes = boxes.squeeze(0)
boxes = boxes[ix][0]
cnter = 0
Cnter1 = 0
for keys in feat_map_keys:
image_features = Image_features[keys][ix].unsqueeze(0)
if keys == 'map1' or keys == 'map2':
Scaling = 4.0
elif keys == 'map3':
Scaling = 8.0
elif keys == 'map4':
Scaling = 16.0
else:
Scaling = 32.0
boxes_scaled = boxes / Scaling
boxes_scaled[:, 1:3] = torch.floor(boxes_scaled[:, 1:3])
boxes_scaled[:, 3:5] = torch.ceil(boxes_scaled[:, 3:5])
boxes_scaled[:, 3:5] = boxes_scaled[:, 3:5] + 1 # make the end indices exclusive
feat_h, feat_w = image_features.shape[-2], image_features.shape[-1]
# make sure exemplars don't go out of bound
boxes_scaled[:, 1:3] = torch.clamp_min(boxes_scaled[:, 1:3], 0)
boxes_scaled[:, 3] = torch.clamp_max(boxes_scaled[:, 3], feat_h)
boxes_scaled[:, 4] = torch.clamp_max(boxes_scaled[:, 4], feat_w)
box_hs = boxes_scaled[:, 3] - boxes_scaled[:, 1]
box_ws = boxes_scaled[:, 4] - boxes_scaled[:, 2]
max_h = math.ceil(max(box_hs))
max_w = math.ceil(max(box_ws))
for j in range(0,M):
y1, x1 = int(boxes_scaled[j,1]), int(boxes_scaled[j,2])
y2, x2 = int(boxes_scaled[j,3]), int(boxes_scaled[j,4])
#print(y1,y2,x1,x2,max_h,max_w)
if j == 0:
examples_features = image_features[:,:,y1:y2, x1:x2]
if examples_features.shape[2] != max_h or examples_features.shape[3] != max_w:
#examples_features = pad_to_size(examples_features, max_h, max_w)
examples_features = F.interpolate(examples_features, size=(max_h,max_w),mode='bilinear')
else:
feat = image_features[:,:,y1:y2, x1:x2]
if feat.shape[2] != max_h or feat.shape[3] != max_w:
feat = F.interpolate(feat, size=(max_h,max_w),mode='bilinear')
#feat = pad_to_size(feat, max_h, max_w)
examples_features = torch.cat((examples_features,feat),dim=0)
"""
Convolving example features over image features
"""
h, w = examples_features.shape[2], examples_features.shape[3]
features = F.conv2d(
F.pad(image_features, ((int(w/2)), int((w-1)/2), int(h/2), int((h-1)/2))),
examples_features
)
combined = features.permute([1,0,2,3])
# computing features for scales 0.9 and 1.1
for scale in exemplar_scales:
h1 = math.ceil(h * scale)
w1 = math.ceil(w * scale)
if h1 < 1: # use original size if scaled size is too small
h1 = h
if w1 < 1:
w1 = w
examples_features_scaled = F.interpolate(examples_features, size=(h1,w1),mode='bilinear')
features_scaled = F.conv2d(F.pad(image_features, ((int(w1/2)), int((w1-1)/2), int(h1/2), int((h1-1)/2))),
examples_features_scaled)
features_scaled = features_scaled.permute([1,0,2,3])
combined = torch.cat((combined,features_scaled),dim=1)
if cnter == 0:
Combined = 1.0 * combined
else:
if Combined.shape[2] != combined.shape[2] or Combined.shape[3] != combined.shape[3]:
combined = F.interpolate(combined, size=(Combined.shape[2],Combined.shape[3]),mode='bilinear')
Combined = torch.cat((Combined,combined),dim=1)
cnter += 1
if ix == 0:
All_feat = 1.0 * Combined.unsqueeze(0)
else:
All_feat = torch.cat((All_feat,Combined.unsqueeze(0)),dim=0)
return All_feat
class resizeImage(object):
"""
If either the width or height of an image exceed a specified value, resize the image so that:
1. The maximum of the new height and new width does not exceed a specified value
2. The new height and new width are divisible by 8
3. The aspect ratio is preserved
No resizing is done if both height and width are smaller than the specified value
By: <NAME> (<EMAIL>)
"""
def __init__(self, MAX_HW=1504):
self.max_hw = MAX_HW
def __call__(self, sample):
image,lines_boxes = sample['image'], sample['lines_boxes']
W, H = image.size
if W > self.max_hw or H > self.max_hw:
scale_factor = float(self.max_hw)/ max(H, W)
new_H = 8*int(H*scale_factor/8)
new_W = 8*int(W*scale_factor/8)
resized_image = transforms.Resize((new_H, new_W))(image)
else:
scale_factor = 1
resized_image = image
boxes = list()
for box in lines_boxes:
box2 = [int(k*scale_factor) for k in box]
y1, x1, y2, x2 = box2[0], box2[1], box2[2], box2[3]
boxes.append([0, y1,x1,y2,x2])
boxes = torch.Tensor(boxes).unsqueeze(0)
resized_image = Normalize(resized_image)
sample = {'image':resized_image,'boxes':boxes}
return sample
class resizeImageWithGT(object):
"""
If either the width or height of an image exceed a specified value, resize the image so that:
1. The maximum of the new height and new width does not exceed a specified value
2. The new height and new width are divisible by 8
3. The aspect ratio is preserved
No resizing is done if both height and width are smaller than the specified value
By: <NAME> (<EMAIL>)
Modified by: Viresh
"""
def __init__(self, MAX_HW=1504):
self.max_hw = MAX_HW
def __call__(self, sample):
image,lines_boxes,density = sample['image'], sample['lines_boxes'],sample['gt_density']
W, H = image.size
if W > self.max_hw or H > self.max_hw:
scale_factor = float(self.max_hw)/ max(H, W)
new_H = 8*int(H*scale_factor/8)
new_W = 8*int(W*scale_factor/8)
resized_image = transforms.Resize((new_H, new_W))(image)
resized_density = cv2.resize(density, (new_W, new_H))
orig_count = np.sum(density)
new_count = np.sum(resized_density)
if new_count > 0: resized_density = resized_density * (orig_count / new_count)
else:
scale_factor = 1
resized_image = image
resized_density = density
boxes = list()
for box in lines_boxes:
box2 = [int(k*scale_factor) for k in box]
y1, x1, y2, x2 = box2[0], box2[1], box2[2], box2[3]
boxes.append([0, y1,x1,y2,x2])
boxes = torch.Tensor(boxes).unsqueeze(0)
resized_image = Normalize(resized_image)
resized_density = torch.from_numpy(resized_density).unsqueeze(0).unsqueeze(0)
sample = {'image':resized_image,'boxes':boxes,'gt_density':resized_density}
return sample
Normalize = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=IM_NORM_MEAN, std=IM_NORM_STD)])
Transform = transforms.Compose([resizeImage( MAX_HW)])
TransformTrain = transforms.Compose([resizeImageWithGT(MAX_HW)])
def denormalize(tensor, means=IM_NORM_MEAN, stds=IM_NORM_STD):
"""Reverses the normalisation on a tensor.
Performs a reverse operation on a tensor, so the pixel value range is
between 0 and 1. Useful for when plotting a tensor into an image.
Normalisation: (image - mean) / std
Denormalisation: image * std + mean
Args:
tensor (torch.Tensor, dtype=torch.float32): Normalized image tensor
Shape:
Input: :math:`(N, C, H, W)`
Output: :math:`(N, C, H, W)` (same shape as input)
Return:
torch.Tensor (torch.float32): Demornalised image tensor with pixel
values between [0, 1]
Note:
Symbols used to describe dimensions:
- N: number of images in a batch
- C: number of channels
- H: height of the image
- W: width of the image
"""
denormalized = tensor.clone()
for channel, mean, std in zip(denormalized, means, stds):
channel.mul_(std).add_(mean)
return denormalized
def scale_and_clip(val, scale_factor, min_val, max_val):
"Helper function to scale a value and clip it within range"
new_val = int(round(val*scale_factor))
new_val = max(new_val, min_val)
new_val = min(new_val, max_val)
return new_val
def visualize_output_and_save(input_, output, boxes, save_path, figsize=(20, 12), dots=None):
"""
dots: Nx2 numpy array for the ground truth locations of the dot annotation
if dots is None, this information is not available
"""
# get the total count
pred_cnt = output.sum().item()
boxes = boxes.squeeze(0)
boxes2 = []
for i in range(0, boxes.shape[0]):
y1, x1, y2, x2 = int(boxes[i, 1].item()), int(boxes[i, 2].item()), int(boxes[i, 3].item()), int(
boxes[i, 4].item())
roi_cnt = output[0,0,y1:y2, x1:x2].sum().item()
boxes2.append([y1, x1, y2, x2, roi_cnt])
img1 = format_for_plotting(denormalize(input_))
output = format_for_plotting(output)
fig = plt.figure(figsize=figsize)
# display the input image
ax = fig.add_subplot(2, 2, 1)
ax.set_axis_off()
ax.imshow(img1)
for bbox in boxes2:
y1, x1, y2, x2 = bbox[0], bbox[1], bbox[2], bbox[3]
rect = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=3, edgecolor='y', facecolor='none')
rect2 = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor='k', linestyle='--', facecolor='none')
ax.add_patch(rect)
ax.add_patch(rect2)
if dots is not None:
ax.scatter(dots[:, 0], dots[:, 1], c='red', edgecolors='blue')
# ax.scatter(dots[:,0], dots[:,1], c='black', marker='+')
ax.set_title("Input image, gt count: {}".format(dots.shape[0]))
else:
ax.set_title("Input image")
ax = fig.add_subplot(2, 2, 2)
ax.set_axis_off()
ax.set_title("Overlaid result, predicted count: {:.2f}".format(pred_cnt))
img2 = 0.2989*img1[:,:,0] + 0.5870*img1[:,:,1] + 0.1140*img1[:,:,2]
ax.imshow(img2, cmap='gray')
ax.imshow(output, cmap=plt.cm.viridis, alpha=0.5)
# display the density map
ax = fig.add_subplot(2, 2, 3)
ax.set_axis_off()
ax.set_title("Density map, predicted count: {:.2f}".format(pred_cnt))
ax.imshow(output)
# plt.colorbar()
ax = fig.add_subplot(2, 2, 4)
ax.set_axis_off()
ax.set_title("Density map, predicted count: {:.2f}".format(pred_cnt))
ret_fig = ax.imshow(output)
for bbox in boxes2:
y1, x1, y2, x2, roi_cnt = bbox[0], bbox[1], bbox[2], bbox[3], bbox[4]
rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=3, edgecolor='y', facecolor='none')
rect2 = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor='k', linestyle='--',
facecolor='none')
ax.add_patch(rect)
ax.add_patch(rect2)
ax.text(x1, y1, '{:.2f}'.format(roi_cnt), backgroundcolor='y')
fig.colorbar(ret_fig, ax=ax)
fig.savefig(save_path, bbox_inches="tight")
plt.close()
def format_for_plotting(tensor):
"""Formats the shape of tensor for plotting.
Tensors typically have a shape of :math:`(N, C, H, W)` or :math:`(C, H, W)`
which is not suitable for plotting as images. This function formats an
input tensor :math:`(H, W, C)` for RGB and :math:`(H, W)` for mono-channel
data.
Args:
tensor (torch.Tensor, torch.float32): Image tensor
Shape:
Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
Output: :math:`(H, W, C)` or :math:`(H, W)`, respectively
Return:
torch.Tensor (torch.float32): Formatted image tensor (detached)
Note:
Symbols used to describe dimensions:
- N: number of images in a batch
- C: number of channels
- H: height of the image
- W: width of the image
"""
has_batch_dimension = len(tensor.shape) == 4
formatted = tensor.clone()
if has_batch_dimension:
formatted = tensor.squeeze(0)
if formatted.shape[0] == 1:
return formatted.squeeze(0).detach()
else:
return formatted.permute(1, 2, 0).detach()
|
96759
|
import torch
import numpy as np
from eval import metrics
import gc
def evaluate_user(model, eval_loader, device, mode='pretrain'):
""" evaluate model on recommending items to users (primarily during pre-training step) """
model.eval()
eval_loss = 0.0
n100_list, r20_list, r50_list = [], [], []
eval_preds = []
with torch.no_grad():
for batch_index, eval_data in enumerate(eval_loader):
eval_data = [x.to(device, non_blocking=True) for x in eval_data]
(users, fold_in_items, held_out_items) = eval_data
fold_in_items = fold_in_items.to(device)
if mode == 'pretrain':
recon_batch, emb = model.user_preference_encoder.pre_train_forward(fold_in_items)
else:
recon_batch = model.group_predictor(model.user_preference_encoder(fold_in_items))
loss = model.multinomial_loss(recon_batch, held_out_items)
eval_loss += loss.item()
fold_in_items = fold_in_items.cpu().numpy()
recon_batch = torch.softmax(recon_batch, 1) # softmax over the item set to get normalized scores.
recon_batch[fold_in_items.nonzero()] = -np.inf
n100 = metrics.ndcg_binary_at_k_batch_torch(recon_batch, held_out_items, 100, device=device)
r20 = metrics.recall_at_k_batch_torch(recon_batch, held_out_items, 20)
r50 = metrics.recall_at_k_batch_torch(recon_batch, held_out_items, 50)
n100_list.append(n100)
r20_list.append(r20)
r50_list.append(r50)
eval_preds.append(recon_batch.cpu().numpy())
del users, fold_in_items, held_out_items, recon_batch
gc.collect()
num_batches = max(1, len(eval_loader.dataset) / eval_loader.batch_size)
eval_loss /= num_batches
n100_list = torch.cat(n100_list)
r20_list = torch.cat(r20_list)
r50_list = torch.cat(r50_list)
return eval_loss, torch.mean(n100_list), torch.mean(r20_list), torch.mean(r50_list), np.array(eval_preds)
def evaluate_group(model, eval_group_loader, device):
""" evaluate model on recommending items to groups """
model.eval()
eval_loss = 0.0
n100_list, r20_list, r50_list = [], [], []
eval_preds = []
with torch.no_grad():
for batch_idx, data in enumerate(eval_group_loader):
data = [x.to(device, non_blocking=True) for x in data]
group, group_users, group_mask, group_items, user_items = data
recon_batch, _, _ = model(group, group_users, group_mask, user_items)
loss = model.multinomial_loss(recon_batch, group_items)
eval_loss += loss.item()
result = recon_batch.softmax(1) # softmax over the item set to get normalized scores.
heldout_data = group_items
r20 = metrics.recall_at_k_batch_torch(result, heldout_data, 20)
r50 = metrics.recall_at_k_batch_torch(result, heldout_data, 50)
n100 = metrics.ndcg_binary_at_k_batch_torch(result, heldout_data, 100, device=device)
n100_list.append(n100)
r20_list.append(r20)
r50_list.append(r50)
eval_preds.append(recon_batch.cpu().numpy())
del group, group_users, group_mask, group_items, user_items
gc.collect()
n100_list = torch.cat(n100_list)
r20_list = torch.cat(r20_list)
r50_list = torch.cat(r50_list)
return eval_loss, torch.mean(n100_list), torch.mean(r20_list), torch.mean(r50_list), np.array(eval_preds)
|
96876
|
import numpy as np
from gym_env.feature_processors.enums import ACTION_NAME_TO_INDEX, DOUBLE_ACTION_PARA_TYPE
class Instance:
# reward is the td n reward plus the target state value
def __init__(self,
dota_time=None,
state_gf=None,
state_ucf=None,
state_ucategory=None,
mask=None,
reward=0,
action=None,
action_params=None,
state_value=0,
dump_path=None,
instant_reward=0.,
gae_advantage=0,
action_prob=None,
sub_action_prob=1,
final_action_prob=None,
model_time=None,
units_mask=None,
lstm_state=None,
lstm_gradient_mask=None,
embedding_dict=None,
dota_map=None,
update_times=0):
self.dota_time = dota_time
self.state_gf = state_gf
self.state_ucf = state_ucf
self.state_ucategory = state_ucategory
self.mask = mask
self.state_value = state_value
self.action = action
self.action_params = action_params
self.q_reward = reward
self.instant_reward = instant_reward
self.model_time = model_time
self.action_prob = action_prob
self.sub_action_prob = sub_action_prob
self.gae_advantage = gae_advantage
self.units_mask = units_mask
self.lstm_state = lstm_state
self.lstm_gradient_mask = 1
self.embedding_dict = embedding_dict
self.dota_map = dota_map
self.update_times = update_times
def zeros_like(self, target_instance):
self.dota_time = 0
self.state_gf = np.zeros_like(target_instance.state_gf)
self.state_ucf = np.zeros_like(target_instance.state_ucf)
# for ensure there is one enemy hero/tower
self.state_ucategory = target_instance.state_ucategory
self.mask = np.zeros_like(target_instance.mask)
self.state_value = 0
self.action = ACTION_NAME_TO_INDEX["STOP"]
self.action_params = {}
for atype in DOUBLE_ACTION_PARA_TYPE:
self.action_params[atype] = 0
self.q_reward = 0
self.instant_reward = 0
self.model_time = target_instance.model_time
self.action_prob = 1
self.sub_action_prob = 1
self.gae_advantage = 0
self.units_mask = np.zeros_like(target_instance.units_mask)
self.lstm_state = np.zeros_like(target_instance.lstm_state)
self.lstm_gradient_mask = 1
self.embedding_dict = target_instance.embedding_dict
self.dota_map = np.zeros_like(target_instance.dota_map)
self.update_times = 0
def padding_instance(reward_instance, latest_instance, total_length, exclude_last_instance):
padding_length = total_length - len(reward_instance)
if exclude_last_instance:
start_position = -len(reward_instance) - 1
else:
start_position = -len(reward_instance)
padding_instances = latest_instance[start_position - padding_length:start_position]
if len(padding_instances) < padding_length:
zero_instance = Instance()
zero_instance.zeros_like(reward_instance[0])
for i in range(padding_length - len(padding_instances)):
padding_instances.insert(0, zero_instance)
#padding instance do not compute gradient
for index, item in enumerate(padding_instances):
padding_instances[index].lstm_gradient_mask = 0
for index, item in enumerate(reward_instance):
reward_instance[index].lstm_gradient_mask = 1
padding_instances.extend(reward_instance)
return padding_instances
|
96881
|
from abc import abstractmethod, ABC
from typing import Any
class Node(ABC):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
self._node_name = node_name
self._node_id = node_id
self._parent_id = parent_id
def __str__(self) -> str:
return self._node_name
def __eq__(self, other: Any) -> bool:
return self.__dict__ == other.__dict__
@property
def node_name(self) -> str:
return self._node_name
@property
def node_id(self) -> str:
return self._node_id
@property
def parent_id(self) -> str:
return self._parent_id
def set_node_name(self, node_name: str) -> None:
self._node_name = node_name
def set_parent_id(self, parent_id: str) -> None:
self._parent_id = parent_id
@abstractmethod
def reset(self) -> None:
pass
|
96930
|
from io import BytesIO
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.template.loader import get_template
from django.http import HttpResponse
from xhtml2pdf import pisa
from .models import Order
def render_to_pdf(template_src, context_dict={}):
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return result.getvalue()
return None
@login_required
def admin_order_pdf(request, order_id):
if request.user.is_superuser:
order = get_object_or_404(Order, pk=order_id)
pdf = render_to_pdf('order_pdf.html', {'order': order})
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
content = "attachment; filename=%s.pdf" % order_id
response['Content-Disposition'] = content
return response
return HttpResponse("Not found")
|
96971
|
from elasticsearch import Elasticsearch, ElasticsearchException
from oslo.config import cfg
from meniscus.data.handlers import base
from meniscus import config
from meniscus import env
_LOG = env.get_logger(__name__)
#Register options for Elasticsearch
elasticsearch_group = cfg.OptGroup(
name="elasticsearch",
title='Elasticsearch Configuration Options')
config.get_config().register_group(elasticsearch_group)
elasticsearch_options = [
cfg.ListOpt('servers',
default=['localhost:9200'],
help="""hostname:port for db servers
"""
),
cfg.IntOpt('bulk_size',
default=100,
help="""Amount of records to transmit in bulk
"""
),
cfg.StrOpt('ttl',
default="30d",
help="""default time to live for documents
inserted into the default store
"""
)
]
config.get_config().register_opts(
elasticsearch_options, group=elasticsearch_group)
try:
config.init_config()
except config.cfg.ConfigFilesNotFoundError as ex:
_LOG.exception(ex.message)
class ElasticsearchHandlerError(base.DataHandlerError):
pass
class ElasticsearchHandler(base.DataHandlerBase):
def __init__(self, conf):
"""
Initialize a data handler for elasticsearch
from settings in the meniscus config.
es_servers: a list[] of {"host": "hostname", "port": "port"} for
elasticsearch servers
bulk_size: hom may records are held before performing a bulk flush
ttl: the default length of time a document should live when indexed
status: the status of the current es connection
"""
self.es_servers = [{
"host": server.split(":")[0],
"port": server.split(":")[1]
} for server in conf.servers
]
if conf.bulk_size < 1:
raise ElasticsearchHandlerError(
"bulk size must be at least 1, bulk size given is {0}".format(
conf.bulk_size)
)
self.bulk_size = conf.bulk_size
self.ttl = conf.ttl
self.status = ElasticsearchHandler.STATUS_NEW
def _check_connection(self):
"""
Check that a pyES connection has been created,
if not, raise an exception
"""
if self.status != ElasticsearchHandler.STATUS_CONNECTED:
raise ElasticsearchHandlerError('Database not connected.')
def connect(self):
"""
Create a connection to elasticsearch.
"""
self.connection = Elasticsearch(hosts=self.es_servers)
self.status = ElasticsearchHandler.STATUS_CONNECTED
def close(self):
"""
Close the connection to elasticsearch
"""
self.connection = None
self.status = ElasticsearchHandler.STATUS_CLOSED
def create_index(self, index, mapping=None):
"""
Creates a new index on the elasticsearch cluster.
:param index: the name of the index to create
:param mapping: a mapping to apply to the index
"""
self._check_connection()
self.connection.indices.create(index=index, body=mapping)
def put_mapping(self, index, doc_type, mapping):
"""
Create a mapping for a doc_type on a specified index
"""
self._check_connection()
self.connection.indices.put_mapping(
index=index, doc_type=doc_type, body=mapping)
def get_handler():
"""
factory method that returns an instance of ElasticsearchHandler
"""
conf = config.get_config()
es_handler = ElasticsearchHandler(conf.elasticsearch)
es_handler.connect()
return es_handler
|
97029
|
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.views import (LogoutView, PasswordResetView, PasswordResetDoneView,
PasswordResetConfirmView, PasswordResetCompleteView)
from waffle.decorators import waffle_switch
from apps.accounts.views.api_profile import my_profile
from apps.accounts.views.core import (create_account, account_settings, activation_verify)
from apps.accounts.views.login import LoginView, PasswordChangeView
urlpatterns = [
url(r'^api/profile$', my_profile, name='my_profile_v2'),
url(r'^logout$', waffle_switch('login')(LogoutView.as_view()), name='logout_v2'),
url(r'^create$', waffle_switch('signup')(create_account),
name='accounts_create_account_v2'),
url(r'^settings$', account_settings, name='account_settings_v2'),
url(r'^login$|^mfa/login$', waffle_switch('login')(LoginView.as_view()), name='login_v2'),
url(r'^password-change$',
waffle_switch('login')(PasswordChangeView.as_view(
template_name='registration/passwd_change_form.html',
success_url='settings')),
name='password_change_v2'),
url(r'^expired-password-change$',
waffle_switch('login')(PasswordChangeView.as_view(
template_name='registration/passwd_change_form.html',
success_url='settings')),
name='expired_password_change_v2'),
url(r'^forgot-password$',
waffle_switch('login')(PasswordResetView.as_view(
template_name='registration/password_forgot_form.html',
email_template_name='email/email-password-forgot-link.txt',
html_email_template_name='email/email-password-forgot-link.html',
from_email=settings.DEFAULT_FROM_EMAIL)),
name='forgot_password_v2'),
url(r'^password-reset-done$',
waffle_switch('login')(PasswordResetDoneView.as_view(
template_name='registration/password_forgot_reset_done.html')),
name='password_reset_done_v2'),
url(r'^password-reset-confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
waffle_switch('login')(PasswordResetConfirmView.as_view(
template_name='registration/password_forgot_reset_confirm_form.html')),
name='password_reset_confirm_v2'),
url(r'^password-reset-complete$',
waffle_switch('login')(PasswordResetCompleteView.as_view(
template_name='registration/password_forgot_reset_complete.html')),
name='password_reset_complete_v2'),
url(r'^activation-verify/(?P<activation_key>[^/]+)/$',
waffle_switch('login')(activation_verify),
name='activation_verify_v2'),
]
|
97051
|
import math
__author__ = 'chenzhao'
from gmission.models import *
# 1km is about 0.01, 1m is 0.00001
def location_nearby_user_count(location_id, r=0.01):
location = Location.query.get(location_id)
P = UserLastPosition
in_rect = (P.longitude >= location.coordinate.longitude - r) & (P.longitude <= location.coordinate.longitude + r) \
& (P.latitude >= location.coordinate.latitude - r) & (P.latitude <= location.coordinate.latitude + r)
c = P.query.filter(in_rect).count()
return c
def get_nearest_n_users(longitude, latitude, n, r=0.00001):
P = UserLastPosition
in_rect = (P.longitude >= longitude - r) & (P.longitude <= longitude + r) \
& (P.latitude >= latitude - r) & (P.latitude <= latitude + r)
c = P.query.filter(in_rect).count()
print 'KNN', n, r, c
if c < n and r < 0.1:
return get_nearest_n_users(longitude, latitude, n, r * 2)
ps = sorted(P.query.filter(in_rect).all(), key=lambda p: geo_distance(p.longitude, p.latitude, longitude, latitude))
return [p.user for p in ps[:n]]
def get_nearby_users(longitude, latitude, r=0.05):
P = UserLastPosition
in_rect = (P.longitude >= longitude - r) & (P.longitude <= longitude + r) \
& (P.latitude >= latitude - r) & (P.latitude <= latitude + r)
c = P.query.filter(in_rect).count()
print ('user in %f bound: %d') % (r, c)
# ps = sorted(P.query.filter(in_rect).all(), key=lambda p: geo_distance(p.longitude, p.latitude, longitude, latitude))
return [p.user for p in P.query.filter(in_rect).all()]
def geo_angle(startPointLong, startPointLati, endPointLong, endPointLati):
angle = math.atan2(endPointLati - startPointLati, endPointLong - startPointLong)
return angle
def geo_distance(long1, lati1, long2, lati2):
return math.sqrt((long1 - long2) ** 2 + (lati1 - lati2) ** 2)
pass
def filter_location(data):
if data.get('location_id', None):
# print 'location_id provided, pop location'
data.pop('location', None)
return
# if 'location' in data:
# # print 'location provided'
# uc_keys = ['name', 'longitude','latitude']
# existing_location = Location.query.filter_by(**dict(zip(uc_keys, map(data['location'].get, uc_keys)))).first()
# # print 'existing location', existing_location
# if existing_location:
# data.pop('location', None)
# data['location_id'] = existing_location.id
if __name__ == '__main__':
pass
|
97053
|
SCRIPT="""
#!/bin/bash
# Generate train/test script for scenario "{scenario}" using the faster-rcnn "alternating optimization" method
set -x
set -e
rm -f $CAFFE_ROOT/data/cache/*.pkl
rm -f {scenarios_dir}/{scenario}/output/*.pkl
DIR=`pwd`
function quit {{
cd $DIR
exit 0
}}
export PYTHONUNBUFFERED="True"
TRAIN_IMDB={train_imdb}
TEST_IMDB={test_imdb}
cd {py_faster_rcnn}
mkdir -p {scenarios_dir}/{scenario}/logs >/dev/null
LOG="{scenarios_dir}/{scenario}/logs/log.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
exec &> >(tee -a "$LOG")
echo Logging output to "$LOG"
time {train_script} {scenario_file} || quit
time ./tools/test_net.py --gpu {gpu_id} \\
--def {testproto} \\
--net {net_final_path} \\
--imdb {test_imdb} \\
--cfg {config_path} || quit
chmod u+x {plot_script}
{plot_script} $LOG {scenarios_dir}/{scenario}/output/results.png || true
MEAN_AP=`grep "Mean AP = " ${{LOG}} | awk '{{print $3}}'`
echo "{scenario} finished with mAP=$MEAN_AP" >> {scenarios_dir}/status.txt
quit
"""
|
97054
|
import time
import datetime
now = datetime.datetime.now()
mid = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(1)
while True:
now = datetime.datetime.now()
if mid < now < mid + datetime.timedelta(seconds=10) :
print("정각입니다")
mid = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(1)
time.sleep(1)
|
97074
|
import json
import pytest
import uuid
from pytest_httpx import HTTPXMock
from tinkoff import tasks
pytestmark = [pytest.mark.django_db]
@pytest.fixture
def idempotency_key() -> str:
return str(uuid.uuid4())
def test(order, idempotency_key, httpx_mock: HTTPXMock):
httpx_mock.add_response(
url=f'https://partner.dolyame.ru/v1/orders/tds-{order.id}/commit',
match_headers={
'X-Correlation-ID': idempotency_key,
},
json={},
)
tasks.commit_dolyame_order(order_id=order.id, idempotency_key=idempotency_key)
result = json.loads(httpx_mock.get_requests()[0].content)
assert result['amount'] == '100500'
assert result['items'][0]['name'] == 'Предоставление доступа к записи курса «Пентакли и Тентакли»'
assert result['items'][0]['price'] == '100500'
assert result['items'][0]['quantity'] == 1
@pytest.mark.xfail(strict=True, reason='Just to make sure above code works')
def test_header(order, idempotency_key, httpx_mock: HTTPXMock):
httpx_mock.add_response(
url=f'https://partner.dolyame.ru/v1/orders/tds-{order.id}/commit',
match_headers={
'X-Correlation-ID': 'SOME-OTHER-VALUE',
},
json={},
)
tasks.commit_dolyame_order(order_id=order.id, idempotency_key=idempotency_key)
|
97082
|
from flask import Flask, render_template, request, send_file
import io
import base64 as b64
import generator
app = Flask(__name__)
def page(**kwargs):
return render_template('main.html', **kwargs)
@app.route("/")
def home():
return page()
@app.route("/<text>", methods=['GET'])
def load_origamicon(text):
buffer = io.BytesIO()
origamicon = generator.create_origamicon(text)
origamicon.save(buffer, format='PNG')
buffer.seek(0)
return send_file(buffer, mimetype='image/png')
@app.route("/", methods=['POST'])
def update_origamicon():
text = request.form['origamicon-text']
if not text:
return page()
buffer = io.BytesIO()
origamicon = generator.create_origamicon(text)
origamicon.save(buffer, format='PNG')
image_data = buffer.getvalue()
image_data = b64.b64encode(image_data).decode()
image = 'data:;base64, {}'.format(image_data)
return page(name=text, image=image)
if __name__ == '__main__':
app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.