id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11548607
|
def is_palindrome(s):
return s[::-1] == s
def get_nearest_palindrome(s):
if is_palindrome(s):
return s
if s[0] == s[-1]:
return s[0] + get_nearest_palindrome(s[1:-1]) + s[-1]
else:
pal_1 = s[0] + get_nearest_palindrome(s[1:]) + s[0]
pal_2 = s[-1] + get_nearest_palindrome(s[:-1]) + s[-1]
if len(pal_1) > len(pal_2):
return pal_2
elif len(pal_1) < len(pal_2):
return pal_1
return pal_1 if pal_1 < pal_2 else pal_2
assert get_nearest_palindrome("racecar") == "racecar"
assert get_nearest_palindrome("google") == "elgoogle"
assert get_nearest_palindrome("egoogle") == "elgoogle"
assert get_nearest_palindrome("elgoog") == "elgoogle"
assert get_nearest_palindrome("race") == "ecarace"
|
11548610
|
class EnvironmentSettings:
def __init__(self):
self.workspace_dir = '/home/lichao/projects/pytracking_lichao/train_ws/' # Base directory for saving network checkpoints.
self.tensorboard_dir = self.workspace_dir + '/tensorboard/' # Directory for tensorboard files.
self.lasot_dir = '/media/martin/Samsung_T5/tracking_datasets/LaSOTBenchmark/'
self.got10k_dir = '/home/lichao/tracking/datasets/GOT-10k/train/'
self.got10k_i_dir = '/home/lichao/tracking/datasets/GOT-10k_i_/train/'
self.trackingnet_dir = ''
self.coco_dir = ''
self.imagenet_dir = ''
self.imagenetdet_dir = ''
|
11548614
|
import matplotlib.pyplot as plt
import shogun as sg
import util
plt.title('LDA')
util.DISTANCE = 0.5
gamma = 0.1
# positive examples
pos = util.get_realdata(True)
plt.plot(pos[0, :], pos[1, :], "r.")
# negative examples
neg = util.get_realdata(False)
plt.plot(neg[0, :], neg[1, :], "b.")
# train lda
labels = util.get_labels()
features = util.get_realfeatures(pos, neg)
lda = sg.create_machine('LDA', gamma=gamma, labels=labels)
lda.train(features)
# compute output plot iso-lines
x, y, z = util.compute_output_plot_isolines(lda)
c = plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.colorbar(c)
plt.show()
|
11548617
|
from decimal import Decimal
import pytest
from stockholm import InvalidOperandError, Money
def test_simple_addition() -> None:
m1 = Money(0)
assert isinstance(m1, Money)
assert m1.amount == 0
assert m1.currency is None
assert str(m1) == "0.00"
m2 = m1 + 1
assert isinstance(m2, Money)
assert m2.amount == 1
assert m2.currency is None
assert str(m2) == "1.00"
m3 = m2 + "1.50 SEK"
assert isinstance(m3, Money)
assert m3.amount == 2.5
assert m3.currency == "SEK"
assert str(m3) == "2.50 SEK"
def test_one_line_addition() -> None:
m = Money(0) + "1 EUR" + "2.5" + 1.51
assert isinstance(m, Money)
assert m.amount == Decimal("5.01")
assert m.currency == "EUR"
assert str(m) == "5.01 EUR"
def test_complex_addition() -> None:
m = 1000 + 500 + Money(150, currency="NOK") + "4711.15000" + Money(-10000) + Money("55.70 NOK") + Decimal("25.01")
assert isinstance(m, Money)
assert m.amount == Decimal("-3558.14")
assert m.currency == "NOK"
assert str(m) == "-3558.14 NOK"
def test_simple_subtraction() -> None:
m1 = Money("1000")
assert isinstance(m1, Money)
assert m1.amount == 1000
assert m1.currency is None
assert str(m1) == "1000.00"
m2 = m1 - 1
assert isinstance(m2, Money)
assert m2.amount == 999
assert m2.currency is None
assert str(m2) == "999.00"
m3 = m2 - "1500 SEK"
assert isinstance(m3, Money)
assert m3.amount == -501
assert m3.currency == "SEK"
assert str(m3) == "-501.00 SEK"
m4 = "500" - m3
assert isinstance(m4, Money)
assert m4.amount == 1001
assert m4.currency == "SEK"
assert str(m4) == "1001.00 SEK"
def test_simple_multiplication() -> None:
m1 = Money("333.3333", currency="SEK")
assert isinstance(m1, Money)
assert m1.amount == Decimal("333.3333")
assert m1.currency == "SEK"
assert str(m1) == "333.3333 SEK"
m2 = m1 * 3
assert isinstance(m2, Money)
assert m2.amount == Decimal("999.9999")
assert m2.currency == "SEK"
assert str(m2) == "999.9999 SEK"
m2 = 3 * m1
assert isinstance(m2, Money)
assert m2.amount == Decimal("999.9999")
assert m2.currency == "SEK"
assert str(m2) == "999.9999 SEK"
m2 = m1 * Money(3)
assert isinstance(m2, Money)
assert m2.amount == Decimal("999.9999")
assert m2.currency == "SEK"
assert str(m2) == "999.9999 SEK"
with pytest.raises(InvalidOperandError):
m1 * m1
def test_simple_division() -> None:
m1 = Money("21", currency="EUR")
assert isinstance(m1, Money)
assert m1.amount == 21
assert m1.currency == "EUR"
assert str(m1) == "21.00 EUR"
m2 = m1 / 7
assert isinstance(m2, Money)
assert m2.amount == 3
assert m2.currency == "EUR"
assert str(m2) == "3.00 EUR"
m3 = m1 / "7 EUR"
assert isinstance(m3, Money)
assert m3.amount == 3
assert m3.currency is None
assert str(m3) == "3.00"
def test_true_division() -> None:
m1 = Money("100", currency="SEK")
assert isinstance(m1, Money)
assert m1.amount == 100
assert m1.currency == "SEK"
assert str(m1) == "100.00 SEK"
m2 = m1 / 3
assert isinstance(m2, Money)
assert round(m2.amount, 9) == Decimal("33.333333333")
assert m2.currency == "SEK"
assert str(m2) == "33.333333333 SEK"
with pytest.raises(ZeroDivisionError):
m1 / 0
m3 = Money("10.39", currency="USD")
exchange_rate = m1 / m3
assert isinstance(exchange_rate, Money)
assert round(exchange_rate, 2) == Decimal("9.62")
assert exchange_rate.currency is None
assert str(exchange_rate) == "9.624639076"
def test_floor_division() -> None:
m1 = Money("100", currency="SEK")
assert isinstance(m1, Money)
assert m1.amount == 100
assert m1.currency == "SEK"
assert str(m1) == "100.00 SEK"
m2 = m1 // 3
assert isinstance(m2, Money)
assert m2.amount == 33
assert m2.currency == "SEK"
assert str(m2) == "33.00 SEK"
m2 = m1 // "3 SEK"
assert isinstance(m2, Money)
assert m2.amount == 33
assert m2.currency is None
assert str(m2) == "33.00"
with pytest.raises(ZeroDivisionError):
m1 // 0
m3 = Money("10.39", currency="USD")
full_usd_amounts = m1 // m3
assert isinstance(full_usd_amounts, Money)
assert full_usd_amounts == 9
assert full_usd_amounts.currency is None
assert str(full_usd_amounts) == "9.00"
def test_modulus() -> None:
m1 = Money("49", currency="SEK")
assert isinstance(m1, Money)
assert m1.amount == 49
assert m1.currency == "SEK"
assert str(m1) == "49.00 SEK"
m2 = m1 % 14
assert isinstance(m2, Money)
assert m2.amount == 7
assert m2.currency == "SEK"
assert str(m2) == "7.00 SEK"
m3 = m1 % Money(14, currency="USD")
assert isinstance(m3, Money)
assert m3.amount == 7
assert m3.currency == "SEK"
assert str(m3) == "7.00 SEK"
m4 = m1 % Money(14, currency="SEK")
assert isinstance(m3, Money)
assert m4.amount == 7
assert m4.currency == "SEK"
assert str(m4) == "7.00 SEK"
def test_divmod() -> None:
m1 = Money("49", currency="SEK")
assert isinstance(m1, Money)
assert m1.amount == 49
assert m1.currency == "SEK"
assert str(m1) == "49.00 SEK"
m2, m3 = divmod(m1, 14)
assert isinstance(m2, Money)
assert m2.amount == 3
assert m2.currency == "SEK"
assert str(m2) == "3.00 SEK"
assert isinstance(m3, Money)
assert m3.amount == 7
assert m3.currency == "SEK"
assert str(m3) == "7.00 SEK"
m3, m4 = divmod(m1, Money(14, currency="USD"))
assert isinstance(m3, Money)
assert m3.amount == 3
assert m3.currency is None
assert str(m3) == "3.00"
assert isinstance(m4, Money)
assert m4.amount == 7
assert m4.currency == "SEK"
assert str(m4) == "7.00 SEK"
m5, m6 = divmod(m1, Money(14, currency="SEK"))
assert isinstance(m5, Money)
assert m5.amount == 3
assert m5.currency is None
assert str(m5) == "3.00"
assert isinstance(m6, Money)
assert m6.amount == 7
assert m6.currency == "SEK"
assert str(m6) == "7.00 SEK"
def test_pow() -> None:
m1 = Money("2", currency="BIT")
assert isinstance(m1, Money)
assert m1.amount == 2
assert m1.currency == "BIT"
assert str(m1) == "2.00 BIT"
m2 = m1 ** 4
assert isinstance(m2, Money)
assert m2.amount == 16
assert m2.currency == "BIT"
assert str(m2) == "16.00 BIT"
m2 = m1 ** Money(4)
assert isinstance(m2, Money)
assert m2.amount == 16
assert m2.currency == "BIT"
assert str(m2) == "16.00 BIT"
with pytest.raises(InvalidOperandError):
m1 ** m1
assert Money(2) ** Money(4) == 16
def test_bad_values() -> None:
m = Money(1, currency="SEK")
with pytest.raises(InvalidOperandError):
m + "5,0"
with pytest.raises(InvalidOperandError):
m + "USD USD"
with pytest.raises(InvalidOperandError):
m - "50 000"
def test_object_arithmetics() -> None:
m = Money(0, currency="SEK")
assert m.add(1).add(2).add(3) == Money(6, currency="SEK")
assert m.add(10).sub(5) == Money(5, currency="SEK")
assert m.add(10).subtract(5) == Money(5, currency="SEK")
with pytest.raises(Exception):
assert m.add(1).add(2).add(3) == Money(6, currency="EUR")
m2 = Money(471100, from_sub_units=True)
assert m2.add(133800, from_sub_units=True) == Money(604900, from_sub_units=True)
assert m2.add(133800, from_sub_units=True) == Money("6049.00")
|
11548633
|
import typing as ty
from .oncall_default import F, OnCallDefault, T
from .stack_context import StackContext
class ContextualDefault(ty.Generic[T]):
"""A shortcut for implementing simple StackContexts that are used only
for OnCallDefaults on a particular function.
Though this _can_ be shared across functions, the parameter name
will have to be identical, and you should consider having separate
ContextVars per function for sanity. If your functions are truly
logically grouped, it might make more sense to write a class.
"""
def __init__(self, param_name: str, default: T, context_prefix: str = ""):
self.param_name = param_name
self.stack_context = StackContext(context_prefix + param_name, default)
self.oncall_default = OnCallDefault(self.stack_context)
def __call__(self) -> T:
return self.stack_context()
def apply(self, f: F) -> F:
return self.oncall_default.apply_to(self.param_name)(f)
def set_default(self, default_value: T):
return self.stack_context.set(default_value)
|
11548664
|
def f():
try:
a
finally:
return 42
# EXPECTED:
[
...,
CODE_START("f"),
LOAD_CONST(None),
SETUP_FINALLY(Block(1)),
...,
POP_BLOCK(0),
BEGIN_FINALLY(0),
POP_FINALLY(0),
POP_TOP(0),
LOAD_CONST(42),
RETURN_VALUE(0),
END_FINALLY(0),
POP_TOP(0),
]
|
11548701
|
import json
import os
from copy import copy
from datetime import datetime
from pathlib import Path
from time import time
from typing import Union
import lightgbm as lgb
import numpy as np
import pandas as pd
from lightgbm import Dataset as lgbDataset
from pytorch_widedeep.utils import LabelEncoder
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from lightgbm_optimizer import ( # isort:skipimport pickle # noqa: E402
LGBOptimizerHyperopt, LGBOptimizerOptuna,
)
SEED = 42
pd.options.display.max_columns = 100
if __name__ == '__main__':
ROOTDIR = Path('/home/robin/jianzh/autotabular/examples/automlbechmark')
PROCESSED_DATA_DIR = ROOTDIR / 'data/processed_data/fb_comments/'
RESULTS_DIR = ROOTDIR / 'results/fb_comments/lightgbm'
if not RESULTS_DIR.is_dir():
os.makedirs(RESULTS_DIR)
fb_comments = pd.read_csv(PROCESSED_DATA_DIR / 'fb_comments.csv')
target_name = 'target'
OPTIMIZE_WITH = 'optuna'
cat_cols = []
for col in fb_comments.columns:
if fb_comments[col].dtype == 'O' or fb_comments[col].nunique(
) < 200 and col != 'target':
cat_cols.append(col)
num_cols = [
c for c in fb_comments.columns if c not in cat_cols + ['target']
]
# TRAIN/VALID for hyperparam optimization
label_encoder = LabelEncoder(cat_cols)
fb_comments = label_encoder.fit_transform(fb_comments)
X = fb_comments.drop(target_name, axis=1)
y = fb_comments[target_name]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
lgbtrain = lgbDataset(
X_train,
y_train,
categorical_feature=cat_cols,
free_raw_data=False,
)
lgbvalid = lgbDataset(
X_test,
y_test,
reference=lgbtrain,
free_raw_data=False,
)
if OPTIMIZE_WITH == 'optuna':
optimizer: Union[LGBOptimizerHyperopt,
LGBOptimizerOptuna] = LGBOptimizerOptuna(
objective='regression')
elif OPTIMIZE_WITH == 'hyperopt':
optimizer = LGBOptimizerHyperopt(objective='regression', verbose=True)
optimizer.optimize(lgbtrain, lgbvalid)
# Final TRAIN/TEST
params = copy(optimizer.best)
params['n_estimators'] = 1000
flgbtrain = lgbDataset(
X_train,
y_train,
categorical_feature=cat_cols,
free_raw_data=False,
)
lgbtest = lgbDataset(
X_test,
y_test,
reference=flgbtrain,
free_raw_data=False,
)
start = time()
model = lgb.train(
params,
flgbtrain,
valid_sets=[lgbtest],
early_stopping_rounds=50,
verbose_eval=True,
)
runtime = time() - start
preds = model.predict(lgbtest.data)
rmse = np.sqrt(mean_squared_error(lgbtest.label, preds))
r2 = r2_score(lgbtest.label, preds)
print(f'RMSE: {rmse}')
print(f'R2: {r2}')
# SAVE
suffix = str(datetime.now()).replace(' ', '_').split('.')[:-1][0]
results_filename = '_'.join(['fb_comments_lightgbm', suffix]) + '.json'
results_d = {}
results_d['best_params'] = optimizer.best
results_d['runtime'] = runtime
results_d['rmse'] = rmse
results_d['r2'] = r2
with open(RESULTS_DIR / results_filename, 'w') as f:
json.dump(results_d, f, indent=4)
|
11548748
|
import time
import sys
from collections import OrderedDict
from metadata_parser.utils import time_memory_track
"""Step 03 (D): Function for Table To VCF"""
@time_memory_track
def fnc_table_to_vcf(infile, meta_header, outfile, samples, formats, infos, genotype_is):
print("converting Table file to VCF")
begin_time = time.time()
gt_tag_as_iupac = []
for gts_tag in genotype_is:
tag_format = gts_tag.split(":")
if tag_format[1] == "iupac":
gt_tag_as_iupac.append(tag_format[0])
with open(infile) as tablefile, open(meta_header) as meta_header, open(
outfile, "w+"
) as vcf_out:
"""Start reading the haplotype file as generator. This saves memory. """
for line in tablefile:
## find and set the indexes ...
# ... of pre-fields, INFO, FORMAT and SAMPLE level information
""" Step 01: The very first line of the file is read;
- to find the variable name and it's index position in the input file.
- almost all the variable created downstream are "global variables".
- SAMPLE level information is automatically identified unless explicitly given.
The sample names is identified using ":" in the column names, so other names
should not have ":" at all.
- FORMAT level tags can also be provided as list, or can be mined automatically
along with SAMPLE by using ":" matching.
- All the preHeader tags, ie. CHROM POS ID REF ALT QUAL FILTER are reserved and
updated by matching the names in text header line.
"""
# to use the "header" name that have already been taken
# this will help in finding appropriate "INFO" level tags from the header file
used_header = []
if line.startswith("CHROM") or line.startswith("#CHROM"):
header_line = line.rstrip("\n").split("\t")
contig_idx, contig_header = check_chrom_in_headerline(header_line)
used_header.append(contig_header)
pos_idx = check_pos_headerline(header_line)
id_idx = header_line.index("ID") if "ID" in header_line else None
ref_idx = header_line.index("REF") if "REF" in header_line else None
alt_idx = header_line.index("ALT") if "ALT" in header_line else None
qual_idx = header_line.index("QUAL") if "QUAL" in header_line else None
filter_idx = (
header_line.index("FILTER") if "FILTER" in header_line else None
)
used_header.extend(
["POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT"]
)
"""INFO tags are identified by matching "INFO:" in the column names."""
infos_in_header = [x for x in header_line if x.startswith("INFO:")]
all_infos = [x.replace("INFO:", "") for x in infos_in_header]
info_tags = process_fields(
given_field=infos, all_fields=all_infos, tag="info"
)
# also find the position of the info tags on header line
infos_idx = []
if len(info_tags) != 0:
for inftag in info_tags:
infos_idx.append(header_line.index("INFO:" + inftag))
else:
infos_idx = None
"""SAMPLE names and FORMAT tags are identified using ":" delimiter in the column names,
after excluding the INFO fields."""
possible_samples = [x for x in header_line if ":" in x]
# get sample names and unique format tags by removing info tags
samples_and_formats = [
x for x in possible_samples if x not in infos_in_header
]
# separate samples and formats
all_samples = [x.split(":")[0] for x in samples_and_formats]
all_formats = [x.split(":")[1] for x in samples_and_formats]
# unique sample names and format tags in order
all_samples = set_of_list_order(all_samples)
all_formats = set_of_list_order(all_formats)
# find the available format tags
format_tags = process_fields(
given_field=formats, all_fields=all_formats, tag="formats"
)
# In the available FORMAT tags, move "GT" field to the beginning.
if "GT" in format_tags:
format_tags.remove("GT")
format_tags.insert(0, "GT")
### prepare sample names
sample_names = process_fields(
given_field=samples, all_fields=all_samples, tag="samples"
)
used_header.extend(sample_names)
print(used_header)
""" Now, Read the meta header and add it to the output VCF file. """
print('\nReading meta header from file "%s" ' % (meta_header.name))
meta_info = get_meta_info(meta_header)
# add meta header to the output VCF file
meta_info += "\n"
meta_info += (
"\t".join(
[
"#CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
]
)
+ "\t"
)
# add SAMPLE fields to output VCF file
meta_info += "\t".join(sample_names)
# print(meta_info)
# Finally, write the header part of the output VCF
vcf_out.write(meta_info + "\n")
continue
"""' Now, extract the required data from each of the remaining lines add to output VCF. """
updated_line = table_to_vcf(
line,
contig_idx,
pos_idx,
id_idx,
ref_idx,
alt_idx,
qual_idx,
filter_idx,
infos_idx,
info_tags,
format_tags,
sample_names,
gt_tag_as_iupac,
header_line,
)
vcf_out.write(updated_line)
vcf_out.write("\n")
print('Elapsed time : "%s".' % (time.time() - begin_time))
"""Function part of Table to VCF """
def table_to_vcf(
line_in,
contig_idx,
pos_idx,
id_idx,
ref_idx,
alt_idx,
qual_idx,
filter_idx,
infos_idx,
info_tags,
format_tags,
sample_names,
gt_tag_as_iupac,
header_line,
):
line = line_in.rstrip("\n").split("\t")
chrom = line[contig_idx] if contig_idx is not None else "."
pos = line[pos_idx] if pos_idx is not None else "."
ids = line[id_idx] if id_idx is not None else "."
ref = line[ref_idx] if ref_idx is not None else "."
alt = line[alt_idx] if alt_idx is not None else "."
qual = line[qual_idx] if qual_idx is not None else "."
filter_ = line[filter_idx] if filter_idx is not None else "."
format_ = ":".join(format_tags) if format_tags is not None else "."
# Update "info tags and value". This is little complex
if info_tags != []:
info_ = []
for ith, itemi in enumerate(info_tags):
tag_val = "=".join([itemi, line[infos_idx[ith]]])
info_.append(tag_val)
info_ = ";".join(info_)
elif info_tags == []:
info_ = "."
# update the output line
line_out = (
"\t".join([chrom, pos, ids, ref, alt, qual, filter_, info_, format_]) + "\t"
)
# Further update the SAMPLE-to-FORMAT values
# pass the line to another function
format_to_sample_vals = update_sample_format(
line, ref, alt, sample_names, format_tags, header_line, gt_tag_as_iupac
)
line_out = line_out + format_to_sample_vals
return line_out
""" Function part of Table to VCF """
def update_sample_format(
line, ref, alt, sample_names, format_tags, header_line, gt_tag_as_iupac
):
# The "line" variable is passed into this function.
# The global variables are "genotype_is", "sample_names" and "format_tags"
# to store updated line
format_sample_line = []
all_alleles = [ref] + alt.split(",")
for namex in sample_names:
namex_vals = []
for tagx in format_tags:
sample_format_tag = namex + ":" + tagx
sample_format_idx = header_line.index(sample_format_tag)
sample_format_val = line[sample_format_idx]
""" further update the sample:format value if GT in table is as IUPAC base """
if tagx in gt_tag_as_iupac:
if sample_format_val in (".", "./.", ".|."):
continue
else:
sep = "/" if "/" in sample_format_val else "|"
sample_format_val = sample_format_val.split(sep)
sample_format_val = [
all_alleles.index(sample_format_val[0]),
all_alleles.index(sample_format_val[1]),
]
sample_format_val = sep.join(str(xth) for xth in sample_format_val)
namex_vals.append(sample_format_val)
format_sample_line.append(":".join(namex_vals))
sample_format_final = "\t".join(format_sample_line)
return sample_format_final
def check_chrom_in_headerline(header_line):
if "#CHROM" in header_line:
return header_line.index("#CHROM"), "#CHROM"
elif "CHROM" in header_line:
return header_line.index("CHROM"), "CHROM"
else:
print("CHROM field does not exist in the input table file. Update your file")
print("Exiting the program")
sys.exit(0)
def check_pos_headerline(header_line):
if "POS" in header_line:
return header_line.index("POS")
else:
print("POS field does not exist. Update your file")
print("Exiting the program")
sys.exit()
def process_fields(given_field, all_fields, tag):
if given_field[0] == "all":
return all_fields
elif len(given_field) == 0:
print(f"No {tag} available.")
if tag == "info":
print("INFO field will be populated with empty '.' value")
return []
else:
if tag == "formats":
return given_field
else:
nonsense_fields = [x for x in given_field if not x in all_fields]
not_used_fields = [x for x in all_fields if not x in given_field]
if tag == "info":
if len(not_used_fields):
print(
"the following INFO tags won't be put in INFO fields of output VCF"
)
if len(nonsense_fields):
print(
f"The following {nonsense_fields} {tag} are not available in table file and not valid."
)
sys.exit(0)
else:
return given_field
def get_meta_info(meta_header):
if meta_header:
meta_info = meta_header.readlines()
# if the meta header has "#CHROM POS REF ...." line then delete it
if meta_info[-1].startswith("#CHROM\tPOS"):
return "".join(meta_info[:-1]).rstrip("\n")
else:
return "".join(meta_info).rstrip("\n")
else:
print("Header with meta information is not provided")
print("Exiting the program")
sys.exit(0)
def set_of_list_order(input_list):
outtemp = OrderedDict()
for item in input_list:
outtemp[item] = None
return list(outtemp)
|
11548769
|
import lasagne
import theano.tensor as T
import numpy as np
# The input must be flattened
class SmthActLayer(lasagne.layers.Layer):
def __init__(self, incoming, x_start, x_end, num_segs, W = lasagne.init.Normal(0.01), **kwargs):
super(DotLayer, self).__init__(incoming, **kwargs);
num_inputs = self.input_shape[1];
self.x_start = x_start;
self.x_end = x_end;
self.x_step = (x_end - x_start) / num_segs;
self.num_segs = num_segs;
self.W = self.add_param(W, (num_segs, num_inputs), name = 'W', small_weights = True);
def basisf(x, start, end):
ab_start = T.le(x_start, x);
lt_end = T.gt(x_end, x);
return 0 * (1 - ab_start) + (x - x_start) * ab_start * lt_end + (x_end - x_start) * (1 - lt_end);
def get_output_for(self, input, **kwargs):
output = T.zeros_like(input);
for s in range(self.num_segs):
output += basisf(output, self.x_start + self.x_step * s, self.x_start + self.x_step * (s + 1)) * self.W[s, :];
return output;
|
11548802
|
class PairingPawns:
def savedPawnCount(self, start):
s = list(start)
for i in xrange(len(start) - 1, 0, -1):
s[i - 1] += s[i] / 2
return s[0]
|
11548803
|
from lenstronomy.PointSource.Types.unlensed import Unlensed
import pytest
import numpy.testing as npt
class TestUnlensed(object):
def setup(self):
self.ps = Unlensed()
self.kwargs = {'point_amp': [2, 1], 'ra_image': [0, 1], 'dec_image': [1, 0]}
def test_image_position(self):
x_img, y_img = self.ps.image_position(self.kwargs)
npt.assert_almost_equal(x_img, self.kwargs['ra_image'])
npt.assert_almost_equal(y_img, self.kwargs['dec_image'])
def test_source_position(self):
x_src, y_src = self.ps.source_position(self.kwargs, kwargs_lens=None)
npt.assert_almost_equal(x_src, self.kwargs['ra_image'])
npt.assert_almost_equal(y_src, self.kwargs['dec_image'])
def test_image_amplitude(self):
amp = self.ps.image_amplitude(self.kwargs, kwargs_lens=None, x_pos=None,
y_pos=None, magnification_limit=None, kwargs_lens_eqn_solver=None)
npt.assert_almost_equal(amp, self.kwargs['point_amp'])
def test_source_amplitude(self):
amp = self.ps.source_amplitude(self.kwargs, kwargs_lens=None)
npt.assert_almost_equal(amp, self.kwargs['point_amp'])
if __name__ == '__main__':
pytest.main()
|
11548900
|
from typing import List, Optional, Tuple
from spacy.language import Language
from edsnlp.utils.deprecation import deprecated_factory
from .accents import Accents
DEFAULT_CONFIG = dict(
accents=None,
)
@deprecated_factory("accents", "eds.accents", default_config=DEFAULT_CONFIG)
@Language.factory("eds.accents", default_config=DEFAULT_CONFIG)
def create_component(
nlp: Language,
name: str,
accents: Optional[List[Tuple[str, str]]],
):
return Accents(
accents=accents,
)
|
11548910
|
from azureml.core.run import Run
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument("--a", type=int, dest="a", help="The alpha parameter")
parser.add_argument("--b", type=int, dest="b", help="The beta parameter")
args = parser.parse_args()
# Produce bad values if 'a' greater than 2
if (args.a > 2):
args.a = 0
run = Run.get_context()
def fake_train(run, a, b):
time.sleep(5)
metric = a + b
run.log("fake_metric", metric)
for epoch in range(20):
fake_train(run, args.a * epoch, args.b)
|
11548924
|
from pymc3.core import *
from pymc3.step_methods.arraystep import ArrayStepShared
from pymc3.theanof import make_shared_replacements
from pymc3.distributions.transforms import logodds
from ContinuousTimeMarkovModel.transforms import *
from theano import function
#import ContinuousTimeMarkovModel.cython.forwardX_cython as cy
import ContinuousTimeMarkovModel.profilingUtil
class ForwardX(ArrayStepShared):
"""
Use forward sampling (equation 10) to sample a realization of S_t, t=1,...,T_n
given Q, B, and X constant.
"""
def __init__(self, vars, N, T, K, D, Dd, O, nObs, model=None):
#DES Temp:
self.logp = []
self.N = N
self.T = T
self.K = K
self.D = D
self.Dd = Dd
self.O = O
self.nObs = nObs
#self.max_obs = max_obs
self.zeroIndices = np.roll(self.T.cumsum(),1)
self.zeroIndices[0] = 0
#self.pos_O_idx = np.zeros((D,max_obs,N), dtype=np.bool_)
#for n in xrange(N):
# for t in xrange(self.T[n]):
# self.pos_O_idx[:,t,n] = np.in1d(np.arange(self.D), self.O[:,t,n])
#self.OO = np.zeros((self.nObs,self.Dd),dtype=np.int)
#self.OO = np.zeros((self.Dd,self.N,self.max_obs),dtype=np.int)
self.negMask = np.zeros((self.nObs,D),dtype=np.int)
#self.negMask = np.zeros((self.N,self.max_obs,D),dtype=np.int)
for n in range(self.N):
n0 = self.zeroIndices[n]
for t in range(self.T[n]):
#for t in range(self.max_obs):
#self.OO[n0+t,:] = self.O[n0+t,:]
self.negMask[n0+t,:] = 1-np.in1d(np.arange(self.D), self.O[n0+t,:]).astype(np.int)
self.posMask = (self.O != -1).astype(np.int)
#self.betaMask = np.zeros((max_obs,N,2))
#for n in range(self.N):
# self.betaMask[:(T[n]-1),n,:] = 1
model = modelcontext(model)
vars = inputvars(vars)
shared = make_shared_replacements(vars, model)
super(ForwardX, self).__init__(vars, shared)
S = self.shared['S']
B0 = logodds.backward(self.shared['B0_logodds'])
B = logodds.backward(self.shared['B_logodds'])
Z = model.vars[6].distribution.transform_used.backward(self.shared['Z_anchoredbeta'])
#Z = anchoredbeta.backward(self.shared['Z_anchoredbeta'])
#Z = logodds.backward(self.shared['Z_logodds'])
L = logodds.backward(self.shared['L_logodds'])
#at this point parameters are still symbolic so we
#must create get_params function to actually evaluate them
self.get_params = evaluate_symbolic_shared(S, B0, B, Z, L)
def sampleState(self, pX):
#pX_norm = pX/np.sum(pX, axis=0)
#r = np.random.uniform(size=self.K)
#drawn_state = np.greater_equal(r, pX_norm[0,:])
pX_norm = (pX.T/np.sum(pX.T,axis=0))[0].T
#pX_norm = (pX.T/np.sum(pX,axis=1))
r = np.random.uniform(size=pX_norm.shape)
#r = np.random.uniform(size=(self.nObs,self.K))
drawn_state = np.greater_equal(r, pX_norm)
#drawn_state = np.greater_equal(r, pX_norm[0,:])
return drawn_state.astype(np.int8)
def computePsi(self, S, B):
#Psi[nt,k,j,i] is the likelihood of x=i at time t given x=j at time t-1
#prob. of getting the comorbidity once you already have it is one.
#prob of not having the comorbidity once you've already had it is zero
#Since state stays the same more often that it changes, by default we
#set the prob. of staying in 0 given you're in 0 to 1.0. We then
#change this to the appropriate B prob. for all instances where there
#was a state change
#import pdb; pdb.set_trace()
Psi = np.zeros((self.nObs,self.K,2,2),dtype=float)
#Psi = np.zeros((self.K,self.N,self.max_obs,2,2))
Psi[:,:,0,0] = 1.0
Psi[:,:,1,1] = 1.0
#use diff to see if state increased, if so prob. are based on B. Note
#we have to insert at the beginning of S to get a diff that is the same
#size as Psi in the time dimension
state_change_idx = np.insert(S[1:]-S[:-1],0,0)
state_change_idx[self.zeroIndices] = 0
#import pdb; pdb.set_trace()
Psi[state_change_idx.nonzero(),:,0,0] = (1-B[:,S[state_change_idx.nonzero()]]).T
Psi[state_change_idx.nonzero(),:,0,1] = B[:,S[state_change_idx.nonzero()]].T
#state_change_idx = np.diff(np.insert(S[:,:],0,1000,axis=1),axis=1) > 0
#Psi[:,state_change_idx,0,0] = 1-B[:,S[state_change_idx]]
#Psi[:,state_change_idx,0,1] = B[:,S[state_change_idx]]
return Psi
#@do_profile()
def computeLikelihoodOfX(self,X,Z,L):
#LikelihoodOfX[nt,k,i] is the probability of O_nt given X_nt,k = i
#import pdb; pdb.set_trace()
LikelihoodOfX = np.zeros((self.nObs,self.K,2))
#Add extra column to get trashed by all the -1's, removed in following line
O_on = np.zeros((self.nObs,self.D+1), dtype='int8')
O_on[np.arange(self.nObs),self.O.T] = 1
O_on = O_on[:,:-1]
Z_on = Z.T[self.O.T]
#TODO: Double check that we should be using the current value of X here...
XZprod_on = (1. - X.reshape(1,self.nObs,self.K)*(Z_on))
otherKProduct_on = np.zeros((self.Dd,self.nObs,self.K))
for k in xrange(self.K):
otherKProduct_on[:,:,k] = XZprod_on[:,:,:k].prod(axis=2)*XZprod_on[:,:,k+1:].prod(axis=2)
probGivenOnX0 = (1-L[self.O.T])[:,:,np.newaxis]*otherKProduct_on
probGivenOnX1 = probGivenOnX0.copy()
probGivenOnX0 = (1.-probGivenOnX0).T*self.posMask + (1-self.posMask)
LikelihoodOfX[:,:,0] = (probGivenOnX0).prod(axis=2).T
probGivenOnX1 = probGivenOnX1*(1.-Z_on)
probGivenOnX1 = (1.-probGivenOnX1).T*self.posMask + (1-self.posMask)
probGivenOffX1 = np.tile((1.-Z.T).reshape(self.D,1,self.K),(1,self.nObs,1))
# Divide by Z_on values so assumes none are 0 at the moment!
#TODO: Get this working for Z having 0 values
totalZ = (1.-Z).prod(axis=1)
Z_on_mask = (1.-Z_on).T*self.posMask + (1-self.posMask)
probGivenOffX1 = totalZ/(Z_on_mask).prod(axis=2).T
#probGivenOffX1 = probGivenOffX1.T*self.negMask + (1-self.negMask)
LikelihoodOfX[:,:,1] = (probGivenOnX1).prod(axis=2).T*probGivenOffX1
# XZprod = (1. - X.reshape(self.nObs,1,self.K)*(Z.T).reshape(1,self.D,self.K))
# otherKProduct = np.zeros((self.nObs,self.D,self.K))
# for k in xrange(self.K):
# otherKProduct[:,:,k] = XZprod[:,:,:k].prod(axis=2)*XZprod[:,:,k+1:].prod(axis=2)
# O_on = O_on.astype(np.bool)
# probGivenOnX0 = (1-L.reshape(1,self.D,1))*otherKProduct
# Can drop probGivenOffX0 since it can be divided out of probGivenOffX1
# probGivenOnX1 = probGivenOnX0.copy()
# probGivenOnX0[~O_on] = 0.
# LikelihoodOfX[:,:,0] = (1.-probGivenOnX0).prod(axis=1)
# probGivenOnX1 = probGivenOnX1*(1.-Z.T).reshape(1,self.D,self.K)
# probGivenOffX1 = np.tile((1.-Z.T).reshape(1,self.D,self.K),(self.nObs,1,1))
# probGivenOnX1[~O_on] = 0.
# probGivenOffX1[O_on] = 1.
# LikelihoodOfX[:,:,1] = (1.-probGivenOnX1).prod(axis=1)*probGivenOffX1.prod(axis=1)
# #LikelihoodOfX[:,:,1] = (1.-probGivenOnX1).prod(axis=1)*probGivenOffX1.prod(axis=1)
return LikelihoodOfX
## def computeLikelihoodOfXk(self, k, X, Z, L):
## LikelihoodOfXk = np.zeros((self.nObs,2))
##
## Z_pos = Z.T[self.OO.T]
## Z_neg = np.tile(Z[k,:],(self.nObs,1))
## XZ = X*Z_pos
## prod_other_k = np.prod((1-XZ[:,:,np.arange(self.K) != k]),axis=2)
##
## posTerms = (1-(1-L[self.OO.T])*prod_other_k)
## posTermsMasked = posTerms*self.posMask.T + (1-self.posMask.T)
## LikelihoodOfXk[:,0] = np.prod(posTermsMasked,axis=0)
##
## posTerms = 1-(1-L[self.OO.T])*(1-Z_pos[:,:,k])*prod_other_k
## posTermsMasked = posTerms*self.posMask.T + (1-self.posMask.T)
## negTerms = 1-Z_neg
## negTermsMasked = negTerms*self.negMask + (1-self.negMask)
## LikelihoodOfXk[:,1] = np.prod(negTermsMasked,axis=1)*np.prod(posTermsMasked,axis=0)
##
## return LikelihoodOfXk
def computeBeta(self,Psi,LikelihoodOfX):
beta = np.ones((self.nObs,self.K,2))
#import pdb; pdb.set_trace()
for n in range(self.N):
n0 = self.zeroIndices[n]
for t in range(self.T[n]-1,0,-1):
beta[n0+t-1,:,:] = np.sum(beta[n0+t,:,np.newaxis,:]*Psi[n0+t,:,:,:]*LikelihoodOfX[n0+t,:,np.newaxis,:],axis=2)
beta[n0+t-1,:,:] = (beta[n0+t-1,:,:].T/np.sum(beta[n0+t-1,:,:],axis=1)).T
return beta
def computePX(self,beta,B0,S,LikelihoodOfX,Psi):
pX0 = beta[self.zeroIndices]*np.array([1-B0[:,S[self.zeroIndices]],B0[:,S[self.zeroIndices]]]).T*LikelihoodOfX[self.zeroIndices,:,:]
pXt = np.insert(beta[1:,:,:,np.newaxis]*Psi[np.tile(np.arange(1,self.nObs)[:,np.newaxis],(1,self.K)),np.tile(np.arange(self.K),(self.nObs-1,1)),:,:]*LikelihoodOfX[1:,:,:,np.newaxis],0,-1,axis=0)
#pXt = np.insert(beta[1:,:,:]*Psi[np.tile(np.arange(1,self.nObs)[:,np.newaxis],(1,self.K)),np.tile(np.arange(self.K),(self.nObs-1,1)),X[:-1,:],:]*LikelihoodOfX[1:,:,:],0,-1,axis=0)
#pXt = np.insert(beta[1:,:,:]*Psi[np.tile(np.arange(1,self.nObs)[:,np.newaxis],(1,self.K)),np.tile(np.arange(self.K),(self.nObs-1,1)),X[:-1,:],:]*LikelihoodOfX[1:,:,:],0,-1,axis=0)
#pXt = beta[:,:,:]*Psi[np.tile(np.arange(self.nObs)[:,np.newaxis],(1,self.K)),np.tile(np.arange(self.K),(self.nObs,1)),X[:,:],:]*LikelihoodOfX[:,:,:]
#pXt = beta[:,:,:]*Psi[np.arange(self.nObs),:,X[:,:],:]*LikelihoodOfX[:,:,:]
pXt[self.zeroIndices] = pX0[:,:,np.newaxis,:]
#pXt[self.zeroIndices] = pX0
return pXt
#@profilingUtil.timefunc
def astep(self,X):
# import pdb; pdb.set_trace()
#timer = profilingUtil.timewith('forwardX step')
S, B0, B, Z, L = self.get_params()
X = np.reshape(X, (self.nObs,self.K)).astype(np.int8)
#X = np.reshape(X, (self.K,self.max_obs,self.N)).astype(np.int8)
Psi = self.computePsi(S,B)
#timer.checkpoint('Computed Psi')
# beta = np.ones((self.nObs,self.K,2))
LikelihoodOfX = self.computeLikelihoodOfX(X,Z,L)
#import pdb; pdb.set_trace()
#timer.checkpoint('Computed LikelihoodX')
beta = self.computeBeta(Psi,LikelihoodOfX)
# for n in range(self.N):
# n0 = self.zeroIndices[n]
# for t in range(self.T[n]-1,0,-1):
# beta[n0+t-1,:,:] = np.sum(beta[n0+t,:,:,np.newaxis]*Psi[n0+t,:,:,:]*LikelihoodOfX[n0+t,:,:,np.newaxis],axis=1)
# beta[n0+t-1,:,:] = (beta[n0+t-1,:,:].T/np.sum(beta[n0+t-1,:,:],axis=1)).T
# pX0 = beta[self.zeroIndices]*np.array([1-B0[:,S[self.zeroIndices]],B0[:,S[self.zeroIndices]]]).T*LikelihoodOfX[self.zeroIndices+1,:,:]
# pXt = beta[:,:,:]*Psi[np.tile(np.arange(self.nObs)[:,np.newaxis],(1,self.K)),np.tile(np.arange(self.K),(self.nObs,1)),X[:,:],:]*LikelihoodOfX[:,:,:]
# #pXt = beta[:,:,:]*Psi[np.arange(self.nObs),:,X[:,:],:]*LikelihoodOfX[:,:,:]
# pXt[self.zeroIndices] = pX0
pXt = self.computePX(beta,B0,S,LikelihoodOfX,Psi)
#import pdb; pdb.set_trace()
X[self.zeroIndices] = self.sampleState(pXt[self.zeroIndices][:,:,0,:])
#X[:,:] = self.sampleState(pXt)
#DES Temp:
logp = 0.0
for n in xrange(self.N):
n0 = self.zeroIndices[n]
logp += np.log(pXt[n0,range(self.K),0,X[n0]]).sum()
for t in xrange(0,self.T[n]-1):
X[n0+t+1] = self.sampleState(pXt[n0+t+1][np.arange(0,self.K),X[n0+t]])
logp += np.log(pXt[n0+t+1,range(self.K),X[n0+t],X[n0+t+1]]).sum()
# for k in range(self.K):
# LikelihoodOfXk = self.computeLikelihoodOfXk(k,X,Z,L)
# timer.checkpoint('after computeLikelihoodOfXk')
#
# for n in range(self.N):
# n0 = self.zeroIndices[n]
# for t in range(self.T[n]-1,0,-1):
# beta[n0+t-1,:] = np.sum(beta[n0+t,np.newaxis,:]*Psi[n0+t,k,:,:]*LikelihoodOfXk[n0+t,np.newaxis,:],axis=1)
# beta[n0+t-1,:] = (beta[n0+t-1,:].T/np.sum(beta[n0+t-1,:])).T
## for t in range(self.max_obs-1,0,-1):
## beta[t-1,:,:] = np.sum(beta[t,:,np.newaxis,:]*Psi[k,:,t,:,:]*LikelihoodOfXk[:,t,np.newaxis,:],axis=2)
## beta[t-1,:,:] = (beta[t-1,:,:].T/np.sum(beta[t-1,:,:], axis=1)).T
## beta[t-1,:,:] = beta[t-1,:,:]*self.betaMask[t-1,:,:]+(1-self.betaMask[t-1,:,:])
# timer.checkpoint('after nt loops')
#
# #TODO: double check this zeroIndices+1 here
# pX0 = beta[self.zeroIndices]*np.array([1-B0[k,S[self.zeroIndices]],B0[k,S[self.zeroIndices]]]).T*LikelihoodOfXk[self.zeroIndices+1,:]
# #DES: What is the t variable doing here??
# #pX0 = beta[0,:,:]*np.array([1-B0[k,S[:,0]],B0[k,S[:,0]]]).T*LikelihoodOfXk[:,t,:]
# pXt = beta[:,:]*Psi[np.arange(self.nObs),k,X[:,k],:]*LikelihoodOfXk[:,:]
# pXt[self.zeroIndices] = pX0
# X[:,k] = self.sampleState(pXt)
# #X[self.zeroIndices,k] = self.sampleState(pX0)
## for t in range(self.max_obs-1):
## Xtk = X[k,t,:]
## pXt = beta[t+1,:,:]*Psi[k,np.arange(self.N),t+1,Xtk,:]*LikelihoodOfXk[:,t+1,:]
## X[k,t+1,:] = self.sampleState(pXt)
#DES Temp:
self.logp.append(logp)
return X
def astep_inplace(self,X):
self.S, self.B0, self.B, self.Z, self.L = self.get_params()
import pdb; pdb.set_trace()
self.X = np.reshape(X, (self.K,self.max_obs,self.N)).astype(np.int8)
#X_new = np.zeros((self.K,self.max_obs,self.N), dtype=np.int8) - 1
for k in range(self.K):
#note we keep Psi and pOt_GIVEN_Xt because they are used
#in the computation of beta ADN then again in the sampling forward of X
beta = np.ones((self.max_obs,self.N,2))
Psi = np.zeros((self.max_obs,self.N,2,2))
pOt_GIVEN_Xt = np.zeros((self.max_obs,self.N,2))
for n in range(self.N):
for t in np.arange(self.T[n]-1, -1, -1):
Xn = self.X[:,t,n]
#(A) Compute Psi which is the probability of jumping to state X_{t+1}=j
#given you're in state X_{t}=i and S_{t}=m.
Psi[t,n,1,1] = 1.0
#if you did NOT change state the probability of getting a new
#comorbidity is zero. If you did change state the new state
#has comordbity onsets associated with it i.e. B
#if t == 0:
# import pdb; pdb.set_trace()
if self.S[n,t] == self.S[n,t-1]:
Psi[t,n,0,0] = 1.0
Psi[t,n,0,1] = 0.0
else:
Psi[t,n,0,0] = 1-self.B[k,self.S[n,t]]
Psi[t,n,0,1] = self.B[k,self.S[n,t]]
#(B) Compute pOt_GIVEN_Xt i.e. the likelihood of X_t,k given Ot and all
#other X_t,l where l =/= k
pos_O_idx_n_t = self.pos_O_idx[:,t,n]
Z_pos = self.Z[:,pos_O_idx_n_t]
#compute prod_other_k which is product term in eq. 13 over k' \neq k
#we compute it for all k, i.e. the kth row is the product of all k's
#except that k. we use Cython here
XZ_t = (Xn*Z_pos.T).T
prod_other_k = np.prod(1-XZ_t[np.arange(self.K) != k,:], axis=0)
pOt_GIVEN_Xt[t,:,0] = np.prod(1-(1-self.L[pos_O_idx_n_t])* \
prod_other_k)
pOt_GIVEN_Xt[t,:,1] = np.prod(1-self.Z[k,np.logical_not(pos_O_idx_n_t)]) * \
np.prod(1 - (1-self.L[pos_O_idx_n_t])* \
(1-Z_pos[k,:])*prod_other_k)
#(C) Now actually set the beta (finally)
#we want this loop to go down to zero so we compute pOt_GIVEN_Xt[0]
#which we need in section (2) to sample the initial X, but obviously
#we won't want to go down to beta[-1] so we skip this part. Just
# a little trick to not have to repeat that code to get pOt_GIVEN_Xt[0]
if t < 1:
break
#beta[:,:,t] = beta[:,:,t] / np.sum(beta[:,:,t],axis=0)
#pOt_GIVEN_Xt[:,:,t] = pOt_GIVEN_Xt[:,:,t] / np.sum(pOt_GIVEN_Xt[:,:,t], axis=0)
beta[t-1,:,:] = np.sum(beta[t,n,:] * Psi[t,n,:,:] * \
pOt_GIVEN_Xt[t,n,:], axis=1)
beta[t-1,:,:] = beta[t-1,n,:] / np.sum(beta[t-1,n,:],axis=0)
#(2)sample X_new
#(A) Sample starting comorbidities
pX0_GIVEN_O0 = beta[0,n,:] * \
np.array([1-self.B0[k,self.S[n,0]],self.B0[k,self.S[n,0]]]) * \
pOt_GIVEN_Xt[0,n,:]
self.X[k,0,n] = self.sampleState(pX0_GIVEN_O0)
#import pdb; pdb.set_trace()
#(B) Sample rest of X's through time
for t in xrange(0,self.T[n]-1):
X_i = self.X[k,t,n]
pXt_next = beta[t+1,n,:] * Psi[t+1,n,X_i,:] * pOt_GIVEN_Xt[t,n,:]
self.X[k,t+1,n] = self.sampleState(pXt_next)
return self.X
def astep_old(self, X):
self.S, self.B0, self.B, self.Z, self.L = self.get_params()
self.K = self.B.shape[0]
self.X = np.reshape(X, (self.K,self.max_obs,self.N)).astype(np.int8)
X_new = np.zeros((self.K,self.max_obs,self.N), dtype=np.int8) - 1
#note we keep Psi and pOt_GIVEN_Xt because they are used
#in the computation of beta ADN then again in the sampling forward of X
beta = np.ones((2,self.K,self.max_obs))
Psi = np.zeros((2,2,self.K,self.max_obs))
pOt_GIVEN_Xt = np.zeros((2,self.K,self.max_obs))
for n in xrange(self.N):
Xn = self.X[:,:,n]
pos_O_idx_n = self.pos_O_idx[:,:,n]
#(1)compute beta a.k.a. the backwards variables a.k.a.
#likelihood of X given the entire time series of observations
for t in np.arange(1):
#for t in np.arange(self.T[n]-1, -1, -1):
#(A) Compute Psi which is the probability of jumping to state X_{t+1}=j
#given you're in state X_{t}=i and S_{t}=m. Note the probability of
#getting the comorbidity once you already have it is one. The prob.
#of not having the comorbidity once you've already had it is zero
Psi[1,1,:,t] = 1.0
#if you did NOT change state the probability of getting a new
#comorbidity is zero. If you did change state the new state
#has comordbity onsets associated with it i.e. B
if self.S[n,t] == self.S[n,t-1]:
Psi[0,0,:,t] = 1.0
Psi[0,1,:,t] = 0.0
else:
Psi[0,0,:,t] = 1-self.B[:,self.S[n,t]]
Psi[0,1,:,t] = self.B[:,self.S[n,t]]
#(B) Compute pOt_GIVEN_Xt i.e. the likelihood of X_t,k given Ot and all
#other X_t,l where l =/= k
pos_O_idx_n_t = pos_O_idx_n[:,t]
Z_pos = self.Z[:,pos_O_idx_n_t]
#compute prod_other_k which is product term in eq. 13 over k' \neq k
#we compute it for all k, i.e. the kth row is the product of all k's
#except that k. we use Cython here
XZ_t = (Xn[:,t]*Z_pos.T).T
n_pos_O = np.sum(pos_O_idx_n_t)
prod_other_k = np.zeros((self.K, n_pos_O))
prod_other_k = compute_prod_other_k.compute(XZ_t, n_pos_O, self.K)
####
self.L[pos_O_idx_n_t] = 0.01
####
pOt_GIVEN_Xt[0,:,t] = np.prod(1-(1-self.L[pos_O_idx_n_t])* \
prod_other_k, axis=1)
pOt_GIVEN_Xt[1,:,t] = np.prod(1-self.Z[:,np.logical_not(pos_O_idx_n_t)], axis=1) * \
np.prod(1 - (1-self.L[pos_O_idx_n_t])* \
(1-Z_pos)*prod_other_k, axis=1)
'''
prob = pOt_GIVEN_Xt[:,:,t] / np.sum(pOt_GIVEN_Xt[:,:,t])
r = np.random.uniform(size=self.K)
drawn_state = np.greater_equal(r, prob[0,:])
Xn[:,t] = drawn_state
'''
#if n==5 and t==0:
# print Xn[:,t], '\n'
'''
for k in range(self.K):
X_on = np.copy(Xn[:,t])
X_on[k] = 1
XZ_on = (X_on*self.Z.T).T
X_off = np.copy(Xn[:,t])
X_off[k] = 0
XZ_off = (X_off*self.Z.T).T
pOt_GIVEN_Xt[0,k,t] = np.prod(1-(1-self.L[pos_O_idx_n_t])*np.prod(1-XZ_off[:,pos_O_idx_n_t],axis=0))*\
np.prod((1-self.L[np.logical_not(pos_O_idx_n_t)])*np.prod(1-XZ_off[:,np.logical_not(pos_O_idx_n_t)],axis=0))
pOt_GIVEN_Xt[1,k,t] = np.prod(1-(1-self.L[pos_O_idx_n_t])*np.prod(1-XZ_on[:,pos_O_idx_n_t],axis=0))*np.prod((1-self.L[np.logical_not(pos_O_idx_n_t)])*np.prod(1-XZ_on[:,np.logical_not(pos_O_idx_n_t)],axis=0))
prob = pOt_GIVEN_Xt[:,k,t] / np.sum(pOt_GIVEN_Xt[:,k,t])
r = np.random.uniform()
drawn_state = np.greater_equal(r, prob[0])
Xn[k,t] = drawn_state
'''
if n==5 and t==0:
import pdb; pdb.set_trace()
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\nX0:\n',Xn[:,t],'\nZ_pos:\n',Z_pos,'\nXZ_t:\n',XZ_t,'\nprod_other_k:\n',prod_other_k,'\n1-L:\n',(1-self.L[pos_O_idx_n_t]),'\nunmult_off\n',1-(1-self.L[pos_O_idx_n_t])* prod_other_k,'\npOFF:\n',pOt_GIVEN_Xt[0,:,t],'\nZ_factor\n',np.prod(1-self.Z[:,np.logical_not(pos_O_idx_n_t)], axis=1),'\nunmult_ON:\n', (1 - (1-self.L[pos_O_idx_n_t])*(1-Z_pos)*prod_other_k),'\npON\n',np.prod(1-self.Z[:,np.logical_not(pos_O_idx_n_t)], axis=1)*np.prod(1 - (1-self.L[pos_O_idx_n_t])*(1-Z_pos)*prod_other_k, axis=1),'\nnorm:\n',pOt_GIVEN_Xt[:,:,0] / np.sum(pOt_GIVEN_Xt[:,:,0],axis=0), '\nbeta\n', beta, '\n'
#print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n:L', self.L[pos_O_idx_n_t], '\nX0', Xn[:,t], '\n'
#print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!nX0', Xn[:,t], '\n'
#print pOt_GIVEN_Xt[:,:,0] / np.sum(pOt_GIVEN_Xt[:,:,0],axis=0), '\nZ[0:10,0]:', Z_pos[0:10,0], '\nL[0:10]', self.L[pos_O_idx_n_t][0:10],'\n', 'Z_factor:', np.prod(1-self.Z[:,np.logical_not(pos_O_idx_n_t)], axis=1),'\n1-L:', (1-self.L[pos_O_idx_n_t])[0:10], '\n'
#print '\nother_k: ', prod_other_k
#print 'Z_factor:', np.prod(1-self.Z[:,np.logical_not(pos_O_idx_n_t)], axis=1)
#print 'pO', pOt_GIVEN_Xt[:,:,0]
#(C) Now actually set the beta (finally)
#we want this loop to go down to zero so we compute pOt_GIVEN_Xt[0]
#which we need in section (2) to sample the initial X, but obviously
#we won't want to go down to beta[-1] so we skip this part. Just
# a little trick to not have to repeat that code to get pOt_GIVEN_Xt[0]
if t < 1:
break
#beta[:,:,t] = beta[:,:,t] / np.sum(beta[:,:,t],axis=0)
#pOt_GIVEN_Xt[:,:,t] = pOt_GIVEN_Xt[:,:,t] / np.sum(pOt_GIVEN_Xt[:,:,t], axis=0)
beta[:,:,t-1] = np.sum(beta[:,:,t] * Psi[:,:,:,t] * \
pOt_GIVEN_Xt[:,:,t], axis=1)
beta[:,:,t-1] = beta[:,:,t-1] / np.sum(beta[:,:,t-1],axis=0)
#(2)sample X_new
#(A) Sample starting comorbidities
pX0_GIVEN_O0 = beta[:,:,0] * \
np.array([1-self.B0[:,self.S[n,0]],self.B0[:,self.S[n,0]]]) * \
pOt_GIVEN_Xt[:,:,0]
X_new[:,0,n] = self.sampleState(pX0_GIVEN_O0)
#if n==5:
#print '\nbeta', beta[:,:,0]
#print 'pS', np.array([self.B0[:,self.S[n,0]],1-self.B0[:,self.S[n,0]]])
# print 'pO', pOt_GIVEN_Xt[:,:,0]
#print 'p', pX0_GIVEN_O0
#print 'X_new', X_new[:,0,n]
#(B) Sample rest of X's through time
for t in xrange(0,self.T[n]-1):
Xnt = X_new[:,t,n]
pXt_next = beta[:,:,t+1] * Psi[Xnt,:,0,t+1].T * pOt_GIVEN_Xt[:,:,t+1]
X_new[:,t+1,n] = self.sampleState(pXt_next)
return X_new
def evaluate_symbolic_shared(S,B0,B,Z,L):
f = function([], [S,B0,B,Z,L])
return f
|
11548935
|
import base64
import json
from collections.abc import Mapping
import boto3
from logger import logger
class Event(Mapping):
def __init__(self, event):
self.event = event
def __getitem__(self, key):
return self.event[key]
def __iter__(self):
return iter(self.event)
def __len__(self):
return len(self.event)
class EventBridgeEvent(Event):
@property
def body(self):
return self.get('body')
@property
def headers(self):
return self.get('headers')
@property
def task_token(self):
return self.get('task-token')
@property
def url(self):
return self.get('url')
class HttpEvent(Event):
@property
def body(self):
if self.get('isBase64Encoded'):
return base64.b64decode(self['body']).decode()
return self.get('body')
@property
def headers(self):
headers = self.get('headers') or {}
return {k.lower(): v for k, v in headers.items()}
@property
def query(self):
return self.get('queryStringParameters')
@property
def route_key(self):
return self.get('routeKey')
@property
def trace_header(self):
return self.headers.get('x-amzn-trace-id')
class Events:
def __init__(self, bus=None, source=None, boto3_session=None):
self.bus = bus or 'default'
self.source = source or 'slack'
self.boto3_session = boto3_session or boto3.Session()
self.client = self.boto3_session.client('events')
def publish(self, detail_type, detail, trace_header=None):
entry = dict(
Detail=json.dumps(detail),
DetailType=detail_type,
EventBusName=self.bus,
Source=self.source,
TraceHeader=trace_header,
)
params = dict(Entries=[{k: v for k, v in entry.items() if v}])
logger.info('PUT EVENTS %s', logger.json(params))
return self.client.put_events(**params)
|
11548946
|
from django.contrib.messages import success
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from .forms import FileForm
from .models import File
def index(request):
form = FileForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save()
success(request, _('You successfully uploaded the file!'))
return redirect(request.path_info)
return render(request, 'index.html', {
'form': form,
'last_3_files': File.objects.order_by('-id')[:3]
})
|
11548953
|
import os
import cv2
import sys
import time
import socket
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import scipy.misc as sm
import numpy as np
import scipy.io as sio
from os import listdir, makedirs, system
from argparse import ArgumentParser
from utils import *
from det_lstm import DET_LSTM
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j * h:j * h + h, i * w:i * w + w] = image
return img
def transform(input_):
return 2 * input_ - 1.
def inverse_transform(input_):
return (input_ + 1.) / 2.
def imsave(images, size, path):
return sm.imsave(path, merge(images, size))
def visualize_lm(posex, posey, visib, lines, image_size):
posey = inverse_transform(posey) * image_size
posex = inverse_transform(posex) * image_size
cpose = np.zeros((image_size, image_size, 48))
for j in xrange(12):
if (visib[lines[j][0]] and visib[lines[j][1]] and
visib[lines[j][2]] and visib[lines[j][3]]):
interp_x = np.linspace((posex[lines[j][0]] + posex[lines[j][1]]) / 2,
(posex[lines[j][2]] + posex[lines[j][3]]) / 2, 4,
True)
interp_y = np.linspace((posey[lines[j][0]] + posey[lines[j][1]]) / 2,
(posey[lines[j][2]] + posey[lines[j][3]]) / 2, 4,
True)
for k in xrange(4):
gmask = gauss2D_mask(
(interp_y[k], interp_x[k]), (image_size, image_size), sigma=8.)
cpose[:, :, j * 4 + k] = gmask / gmask.max()
else:
if visib[lines[j][0]] and visib[lines[j][1]]:
point_x = (posex[lines[j][0]] + posex[lines[j][1]]) / 2
point_y = (posey[lines[j][0]] + posey[lines[j][1]]) / 2
gmask = gauss2D_mask(
(point_y, point_x), (image_size, image_size), sigma=8.)
cpose[:, :, j * 4] = gmask / gmask.max()
if visib[lines[j][2]] and visib[lines[j][3]]:
point_x = (posex[lines[j][2]] + posex[lines[j][3]]) / 2
point_y = (posey[lines[j][2]] + posey[lines[j][3]]) / 2
gmask = gauss2D_mask(
(point_y, point_x), (image_size, image_size), sigma=8.)
cpose[:, :, (j + 1) * 4 - 1] = gmask / gmask.max()
return np.amax(cpose, axis=2)
def main(gpu, image_size, batch_size, num_layer, lstm_units, seen_step,
fut_step, mem_frac, keep_prob, learning_rate):
lm_size = 13
input_size = lm_size * 2
num_class = 8
prefix = 'PENNACTION_DET_LSTM'
for kk, vv in locals().iteritems():
if kk != 'prefix' and kk != 'mem_frac' and kk != 'gpu':
prefix += '_' + kk + '=' + str(vv)
layers = []
for i in range(num_layer):
layers.append(lstm_units)
lines = [[0, 0, 1, 2], [1, 1, 2, 2], [1, 1, 3, 3], [3, 3, 5, 5],
[2, 2, 4, 4], [4, 4, 6, 6], [1, 2, 7, 8], [7, 7, 8, 8],
[7, 7, 9, 9], [9, 9, 11, 11], [8, 8, 10, 10], [10, 10, 12, 12]]
class_dict = {
'baseball_pitch': 0,
'baseball_swing': 1,
'clean_and_jerk': 2,
'golf_swing': 3,
'jumping_jacks': 4,
'jump_rope': 5,
'tennis_forehand': 6,
'tennis_serve': 7
}
samples_dir = './samples/' + prefix
models_dir = './models/' + prefix
logs_dir = './logs/' + prefix
data_path = './datasets/PennAction/'
trainfiles = open(data_path + 'train_subset_list.txt',
'r').readlines()
alldata = []
for i in xrange(len(trainfiles)):
vid_path = trainfiles[i].split()[0]
tks = vid_path.split('frames')
tdata = np.load(data_path + 'labels/'+ tks[1][1:] + '.npz')
data = {}
for kk, vv in tdata.iteritems():
data[kk] = vv
data['x'] = data['x'] / (1.0 * data['bbox'][0, 3] - data['bbox'][0, 1])
data['y'] = data['y'] / (1.0 * data['bbox'][0, 2] - data['bbox'][0, 0])
alldata.append(data)
with tf.device('/gpu:%d' % gpu):
lstm = DET_LSTM(batch_size, input_size, layers, seen_step, fut_step,
keep_prob, logs_dir, learning_rate)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_frac)
with tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
loaded, model_name = lstm.load(sess, models_dir)
if loaded:
print("[*] Load SUCCESS")
step = int(model_name.split("-")[-1])
else:
print("[!] Load failed...")
step = 0
total_steps = round(600000 * 16 / batch_size)
del_list = None
while step < total_steps:
mini_batches, del_list = get_minibatches_idx(
len(trainfiles),
batch_size,
shuffle=True,
min_frame=None,
trainfiles=trainfiles,
del_list=del_list)
for _, batchidx in mini_batches:
start_time = time.time()
if len(batchidx) == batch_size:
pose_batch = np.zeros(
(batch_size, seen_step + fut_step, input_size), dtype='float32')
mask_batch = np.zeros(
(batch_size, seen_step + fut_step, lm_size), dtype='float32')
act_batch = np.zeros((batch_size, num_class), dtype='int32')
for i in xrange(batch_size):
ff = alldata[batchidx[i]]
high = ff['nframes'] - fut_step - seen_step + 1
if ff['nframes'] < fut_step + seen_step:
stidx = 0
else:
stidx = np.random.randint(
low=0, high=ff['nframes'] - fut_step - seen_step + 1)
posey = transform(ff['y'][stidx:stidx + seen_step + fut_step, :])
posex = transform(ff['x'][stidx:stidx + seen_step + fut_step, :])
visib = ff['visibility'][stidx:stidx + seen_step + fut_step]
if posey.shape[0] < fut_step + seen_step:
n_missing = fut_step + seen_step - posey.shape[0]
posey = np.concatenate(
(posey, np.tile(posey[-1], (n_missing, 1))), axis=0)
posex = np.concatenate(
(posex, np.tile(posex[-1], (n_missing, 1))), axis=0)
visib = np.concatenate(
(visib, np.tile(visib[-1], (n_missing, 1))), axis=0)
pose_batch[i] = np.concatenate((posex, posey), axis=1)
mask_batch[i] = visib
act_batch[i, class_dict[str(ff['action'][0])]] = 1
lbl = act_batch[i].argmax()
mid_time = time.time()
err = lstm.train(
sess, pose_batch, mask_batch, step, save_logs=True)
if step % 100 == 0:
output = lstm.predict(sess, pose_batch, mask_batch)
samples = None
for idx in range(1):
for stp in range(seen_step + fut_step):
pre = output[idx, stp, :2 * lm_size]
posex, posey, visib = (pre[:lm_size], pre[lm_size:],
np.ones(mask_batch[idx, stp, :].shape))
act = class_dict.keys()[
class_dict.values().index(act_batch[idx].argmax())]
visib = np.ones(posex.shape)
sample = visualize_lm(posex, posey, visib, lines, image_size)
sample = sample.reshape((1, image_size, image_size))
samples = sample if samples == None else np.concatenate(
[samples, sample], axis=0)
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
img_save_path = samples_dir + '/{0:07d}'.format(
step) + '_' + act + '.png'
imsave(samples, [1, seen_step + fut_step], img_save_path)
print('step=%d/%d, loss=%.12f, time=%.2f+%.2f' % (
step, total_steps, err, mid_time - start_time,
time.time() - mid_time))
if step >= 10000 and step % 10000 == 0:
lstm.save(sess, models_dir, lstm.global_step)
step = step + 1
lstm.save(sess, models_dir, lstm.global_step)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--gpu", type=int, dest="gpu", required=True, help="GPU device id")
parser.add_argument(
"--image_size",
type=int,
default=128,
dest="image_size",
help="Spatial size of image")
parser.add_argument(
"--batch_size",
type=int,
default=256,
dest="batch_size",
help="Batch size for training")
parser.add_argument(
"--num_layer",
type=int,
default=1,
dest="num_layer",
help="Number of hidden layers for LSTM")
parser.add_argument(
"--lstm_units",
type=int,
default=1024,
dest="lstm_units",
help="Number of hidden units for LSTM")
parser.add_argument(
"--seen_step",
type=int,
default=10,
dest="seen_step",
help="Number of seen steps")
parser.add_argument(
"--fut_step",
type=int,
default=32,
dest="fut_step",
help="Number of steps into future")
parser.add_argument(
"--mem_frac",
type=float,
default=0.4,
dest="mem_frac",
help="GPU memory fraction to take up")
parser.add_argument(
"--keep_prob",
type=float,
default=1.0,
dest="keep_prob",
help="Keep probability for dropout")
parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
dest="learning_rate",
help="Keep probability for dropout")
args = parser.parse_args()
main(**vars(args))
|
11548984
|
import json
from datetime import date
import scrapy
from dateutil.rrule import DAILY, rrule
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class SigpubGazetteSpider(BaseGazetteSpider):
"""www.diariomunicipal.com.br (Sigpub) base spider
Documents obtained by this kind of spider are text-PDFs with many cities in it.
That's because the websites are usually made for associations of cities.
TODO:
- All variations have a "possible" start date of 01/01/2009, but that may cause
many unnecessary requests to be made if they actually start making available
documents later. Some investigation for the start date of each website needs to
be made in this case.
Observations:
- These websites have an "Advanced Search", but they are protected by ReCaptcha.
"""
start_date = date(2009, 1, 1)
def start_requests(self):
"""Requests start page where the calendar widget is available."""
yield scrapy.Request(self.CALENDAR_URL, callback=self.parse_calendar)
def parse_calendar(self, response):
"""Makes requests for each date to see if a document is available."""
default_form_fields = {
"calendar[_token]": response.xpath(
"//input[@id='calendar__token']/@value"
).get()
}
for gazette_date, date_form_fields in self.available_dates_form_fields():
formdata = {**default_form_fields, **date_form_fields}
yield scrapy.FormRequest(
url=response.urljoin("materia/calendario"),
formdata=formdata,
meta={"date": gazette_date, "edition_type": "regular"},
callback=self.parse_gazette_info,
)
yield scrapy.FormRequest(
url=response.urljoin("materia/calendario/extra"),
formdata=formdata,
meta={"date": gazette_date, "edition_type": "extra"},
callback=self.parse_gazette_info,
)
def parse_gazette_info(self, response):
"""Parses document availability endpoint and gets document URL if available."""
body = json.loads(response.text)
meta = response.meta
if "error" in body:
self.logger.debug(
f"{meta['edition_type'].capitalize()} Gazette not available for {meta['date'].date()}"
)
return
for edition in body["edicao"]:
url = f"{body['url_arquivos']}{edition['link_diario']}.pdf"
yield Gazette(
date=meta["date"].date(),
file_urls=[url],
power="executive_legislative",
is_extra_edition=(meta["edition_type"] == "extra"),
edition_number=edition.get("numero_edicao", ""),
)
def available_dates_form_fields(self):
"""Generates dates and corresponding form fields for availability endpoint."""
available_dates = rrule(
freq=DAILY, dtstart=self.start_date, until=self.end_date
)
for query_date in available_dates:
form_fields = {
"calendar[day]": str(query_date.day),
"calendar[month]": str(query_date.month),
"calendar[year]": str(query_date.year),
}
yield query_date, form_fields
|
11549002
|
from file_system import manager
from multiprocessing.connection import Listener, wait
import threading
import time
def main():
fsm = manager.FileSystemManager()
address = ('localhost', 7000)
print('Listening:', address[0])
while True:
with Listener(address, authkey=b's<PASSWORD>') as listener:
with listener.accept() as conn:
print('connection accepted from', listener.last_accepted)
msg = conn.recv()
fsm.input(msg)
result = fsm.output()
conn.send(str(result))
if __name__ == '__main__':
main()
|
11549090
|
from office365.runtime.client_runtime_context import ClientRuntimeContext
class ExcelService(ClientRuntimeContext):
def __init__(self, context):
"""
Excel Services REST API client
https://docs.microsoft.com/en-us/sharepoint/dev/general-development/excel-services-rest-api
"""
super(ExcelService, self).__init__()
def authenticate_request(self, request):
pass
def service_root_url(self):
return "{0}/_vti_bin/ExcelRest.aspx"
def pending_request(self):
pass
def get_workbook(self, list_name, file_name):
return self
|
11549104
|
from leapp import reporting
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.stdlib import api
from leapp.models import StorageInfo
# man 5 xfs
REMOVED_XFS_OPTIONS = set([
# removed from kernel in 4.0
'nodelaylog',
'delaylog',
'ihashsize',
'irixsgid',
'osyncisdsync',
'osyncisosync',
# removed from kernel in 4.19
'nobarrier',
'barrier',
])
def _get_storage_data():
storage = next(api.consume(StorageInfo), None)
if not storage:
raise StopActorExecutionError('The StorageInfo message is not available.')
if not storage.fstab:
raise StopActorExecutionError('Data from the /etc/fstab file is missing.')
return storage
def process():
storage = _get_storage_data()
used_removed_options = set()
for entry in storage.fstab:
if entry.fs_vfstype == 'xfs':
# NOTE: some opts could have a value, like ihashsize=4096 - we want
# just the name of the option (that's why the double-split)
options = set([opt.split('=')[0] for opt in entry.fs_mntops.split(',')])
used_removed_options.update(options.intersection(REMOVED_XFS_OPTIONS))
if not used_removed_options:
return
list_separator_fmt = '\n - '
reporting.create_report([
reporting.Title('Deprecated XFS mount options present in FSTAB.'),
reporting.Summary(
'Some XFS mount options are not supported on RHEL 8 and prevent'
' system from booting correctly if any of the reported XFS options are used.'
' filesystem:{}{}.'.format(
list_separator_fmt,
list_separator_fmt.join(list(REMOVED_XFS_OPTIONS)))),
reporting.Severity(reporting.Severity.HIGH),
reporting.Flags([reporting.Flags.INHIBITOR]),
reporting.Tags([reporting.Tags.FILESYSTEM]),
reporting.RelatedResource('file', '/etc/fstab'),
reporting.Remediation(hint=(
'Drop the following mount options from the /etc/fstab file for any'
' XFS filesystem: {}.'.format(', '.join(used_removed_options)))),
])
|
11549141
|
import functools
import warnings
import sklearn.metrics as skmetrics
def _handle_cropped(y_p):
"""
A straightforward helper that simply averages multiple crops if they are present.
Parameters
----------
y_p: np.ndarray
The predicted values with shape batch x targets (x <optional crops>)
Returns
-------
y_p_mean: np.ndarray
If there is an additional crop dimensions, mean across this dimension
"""
if len(y_p.shape) == 2:
return y_p
elif len(y_p.shape) == 3:
return y_p.mean(-1)
else:
raise ValueError("Predictions should be 1 or 2 dimensions in shape (excluding batches)")
def _binarize_two_class(y_p):
if y_p.shape[-1] == 2:
return y_p[..., -1]
elif y_p.shape[-1] > 2:
# print("This simple metric implementation doesn't support multi-class targets.")
return 0
def _get_prediction(outputs):
"""Checks if multiple outputs were provided, and selects"""
if isinstance(outputs, (list, tuple)):
return outputs[0]
return outputs
def dn3_sklearn_metric(func):
@functools.wraps(func)
def wrapper(inputs, outputs, **kwargs):
outputs = _get_prediction(outputs)
y_p = _handle_cropped(outputs.detach().cpu().numpy()).argmax(-1)
y_t = inputs[-1].detach().cpu().numpy()
# Get all sorts of warning during training because batches aren't stable, we ignore these
# careful because this could make debugging real problems in val/test impossible
# TODO have some sort of warning system for the library to not do this when debugging...
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return func(y_t, y_p, **kwargs)
return wrapper
def dn3_sklearn_binarized(func):
@functools.wraps(func)
def wrapper(y_t, y_p, **kwargs):
y_p = _get_prediction(y_p)
y_p = _binarize_two_class(y_p)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return func(y_t[-1].detach().cpu().numpy(), y_p.detach().cpu().numpy(), **kwargs)
return wrapper
@dn3_sklearn_binarized
def auroc(y_t, y_p):
return skmetrics.roc_auc_score(y_t, y_p)
@dn3_sklearn_metric
def balanced_accuracy(y_t, y_p):
return skmetrics.balanced_accuracy_score(y_t, y_p)
@dn3_sklearn_metric
def kappa(y_t, y_p):
return skmetrics.cohen_kappa_score(y_t, y_p)
|
11549150
|
from __future__ import division, unicode_literals, absolute_import
import numpy as np
try:
import lalsimulation as lalsim
except Exception:
pass
class lal_wrapper(object):
def __init__(self, approx, domain):
self.approx = lalsim.__dict__[approx]
self.domain = domain
def __call__(self, freqs, params):
if self.domain == 'time' :
return generate_timedomain_waveform(self.approx, params)
elif self.domain == 'freq' :
fr, hp, hc = generate_freqdomain_waveform(self.approx, params)
indxs = np.where((fr>=params['f_min'])&(fr<=params['f_max']))
return hp[indxs], hc[indxs]
else:
raise ValueError("Unable to generate LAL waveform, invalid domain.")
def generate_timedomain_waveform(approx, params):
"""
SimInspiralChooseTDWaveform:
REAL8TimeSeries **hplus, /**< +-polarization waveform */
REAL8TimeSeries **hcross, /**< x-polarization waveform */
const REAL8 m1, /**< mass of companion 1 (kg) */
const REAL8 m2, /**< mass of companion 2 (kg) */
const REAL8 S1x, /**< x-component of the dimensionless spin of object 1 */
const REAL8 S1y, /**< y-component of the dimensionless spin of object 1 */
const REAL8 S1z, /**< z-component of the dimensionless spin of object 1 */
const REAL8 S2x, /**< x-component of the dimensionless spin of object 2 */
const REAL8 S2y, /**< y-component of the dimensionless spin of object 2 */
const REAL8 S2z, /**< z-component of the dimensionless spin of object 2 */
const REAL8 distance, /**< distance of source (m) */
const REAL8 inclination, /**< inclination of source (rad) */
const REAL8 phiRef, /**< reference orbital phase (rad) */
const REAL8 longAscNodes, /**< longitude of ascending nodes, degenerate with the polarization angle, Omega in documentation */
const REAL8 eccentricity, /**< eccentrocity at reference epoch */
const REAL8 UNUSED meanPerAno, /**< mean anomaly of periastron */
const REAL8 deltaT, /**< sampling interval (s) */
const REAL8 f_min, /**< starting GW frequency (Hz) */
REAL8 f_ref, /**< reference GW frequency (Hz) */
LALDict *LALparams, /**< LAL dictionary containing accessory parameters */
const Approximant approximant /**< post-Newtonian approximant to use for waveform production */
"""
LALDict = lalsim.lal.CreateDict()
if params['lambda1'] != 0. :
lalsim.SimInspiralWaveformParamsInsertTidalLambda1(LALDict, params['lambda1'])
if params['lambda2'] != 0. :
lalsim.SimInspiralWaveformParamsInsertTidalLambda2(LALDict, params['lambda2'])
hp,hc = lalsim.SimInspiralChooseTDWaveform(lalsim.lal.MSUN_SI*params['mtot']*params['q']/(1.+params['q']),
lalsim.lal.MSUN_SI*params['mtot']/(1.+params['q']),
params['s1x'],params['s1y'],params['s1z'],
params['s2x'],params['s2y'],params['s2z'],
params['distance']*1e6*lalsim.lal.PC_SI,
params['iota'],
params['phi_ref'],
0.0, params['eccentricity'], 0.0,
1./params['srate'],
params['f_min'],
params['f_min'],
LALDict,
approx)
hp = hp.data.data
hc = hc.data.data
return np.array(hp) , np.array(hc)
def generate_freqdomain_waveform(approx, params):
"""
SimInspiralChooseFDWaveform:
COMPLEX16FrequencySeries **hptilde, /**< FD plus polarization */
COMPLEX16FrequencySeries **hctilde, /**< FD cross polarization */
const REAL8 m1, /**< mass of companion 1 (kg) */
const REAL8 m2, /**< mass of companion 2 (kg) */
const REAL8 S1x, /**< x-component of the dimensionless spin of object 1 */
const REAL8 S1y, /**< y-component of the dimensionless spin of object 1 */
const REAL8 S1z, /**< z-component of the dimensionless spin of object 1 */
const REAL8 S2x, /**< x-component of the dimensionless spin of object 2 */
const REAL8 S2y, /**< y-component of the dimensionless spin of object 2 */
const REAL8 S2z, /**< z-component of the dimensionless spin of object 2 */
const REAL8 distance, /**< distance of source (m) */
const REAL8 inclination, /**< inclination of source (rad) */
const REAL8 phiRef, /**< reference orbital phase (rad) */
const REAL8 longAscNodes, /**< longitude of ascending nodes, degenerate with the polarization angle, Omega in documentation */
const REAL8 eccentricity, /**< eccentricity at reference epoch */
const REAL8 UNUSED meanPerAno, /**< mean anomaly of periastron */
// frequency sampling parameters, no default value
const REAL8 deltaF, /**< sampling interval (Hz) */
const REAL8 f_min, /**< starting GW frequency (Hz) */
const REAL8 f_max, /**< ending GW frequency (Hz) */
REAL8 f_ref, /**< Reference frequency (Hz) */
LALDict *LALparams, /**< LAL dictionary containing accessory parameters */
const Approximant approximant /**< post-Newtonian approximant to use for waveform production */
"""
LALDict = lalsim.lal.CreateDict()
if params['lambda1'] != 0. :
lalsim.SimInspiralWaveformParamsInsertTidalLambda1(LALDict, params['lambda1'])
if params['lambda2'] != 0. :
lalsim.SimInspiralWaveformParamsInsertTidalLambda2(LALDict, params['lambda2'])
hp,hc = lalsim.SimInspiralChooseFDWaveform(lalsim.lal.MSUN_SI*params['mtot']*params['q']/(1.+params['q']),
lalsim.lal.MSUN_SI*params['mtot']/(1.+params['q']),
params['s1x'],params['s1y'],params['s1z'],
params['s2x'],params['s2y'],params['s2z'],
params['distance']*1e6*lalsim.lal.PC_SI,
params['iota'],
params['phi_ref'],
0.0, params['eccentricity'], 0.0,
1./params['seglen'],
params['f_min'],
params['f_max'],
params['f_min'],
LALDict,
approx)
hp = hp.data.data
hc = hc.data.data
L = len(hp)
freq = np.arange(L)/params['seglen']
return freq, np.array(hp, dtype=complex) , np.array(hc, dtype=complex)
|
11549268
|
import diffrax
import jax
import jax.numpy as jnp
import pytest
from helpers import random_pytree, shaped_allclose, treedefs
def test_fill_forward():
in_ = jnp.array([jnp.nan, 0.0, 1.0, jnp.nan, jnp.nan, 2.0, jnp.nan])
out_ = jnp.array([jnp.nan, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0])
fill_in = diffrax.misc.fill_forward(in_[:, None])
assert shaped_allclose(fill_in, out_[:, None], equal_nan=True)
def test_ω_add_mul(getkey):
# ω(...) initialisation
ω = diffrax.misc.ω
a = [0, 1]
b = [1, 2]
c = (ω(a) + ω(b)).ω
assert c == [1, 3]
# ...**ω initialisation
for treedef in treedefs:
a = b = c = random_pytree(getkey(), treedef)
e1 = (a**ω * 2 + b**ω * c**ω - 3).ω
e2 = jax.tree_map(lambda ai, bi, ci: ai * 2 + bi * ci - 3, a, b, c)
assert shaped_allclose(e1, e2)
def test_ω_inplace(getkey):
ω = diffrax.misc.ω
for treedef in treedefs:
a = random_pytree(getkey(), treedef)
b1 = ω(a).at[()].set(3).ω
b2 = jax.tree_map(lambda ai: ai.at[()].set(3), a)
assert shaped_allclose(b1, b2)
a2 = jax.tree_map(lambda x: x + 1, a)
b3 = ω(a).at[()].set(ω(a2)).ω
b4 = jax.tree_map(lambda ai, a2i: ai.at[()].set(a2i[()]), a, a2)
assert shaped_allclose(b3, b4)
def test_ω_is_leaf(getkey):
ω = diffrax.misc.ω
for treedef in treedefs:
a = b = random_pytree(getkey(), treedef)
with pytest.raises(ValueError):
ω(a) + ω(b, is_leaf=lambda x: isinstance(x, int))
with pytest.raises(ValueError):
ω(a, is_leaf=lambda x: isinstance(x, int)) + ω(b)
with pytest.raises(ValueError):
ω(a, is_leaf=lambda x: isinstance(x, int)) + ω(
b, is_leaf=lambda x: isinstance(x, (int, str))
)
out = ω(a, is_leaf=lambda x: isinstance(x, int)) + ω(
b, is_leaf=lambda x: isinstance(x, int)
)
assert out.is_leaf(4)
assert not out.is_leaf("hi")
b = ω(a, is_leaf=lambda x: isinstance(x, int)).at[()].set(3)
assert out.is_leaf(4)
assert not out.is_leaf("hi")
a2 = jax.tree_map(lambda x: x + 1, a)
c = (
ω(a, is_leaf=lambda x: isinstance(x, int))
.at[()]
.set(ω(a2, is_leaf=lambda x: isinstance(x, int)))
)
assert c.is_leaf(4)
assert not c.is_leaf("hi")
with pytest.raises(ValueError):
ω(a, is_leaf=lambda x: isinstance(x, int)).at[()].set(ω(a2))
with pytest.raises(ValueError):
ω(a).at[()].set(ω(a2, is_leaf=lambda x: isinstance(x, int)))
def test_unvmap():
unvmap_all = diffrax.misc.unvmap_all
unvmap_any = diffrax.misc.unvmap_any
jit_unvmap_all = jax.jit(unvmap_all)
jit_unvmap_any = jax.jit(unvmap_any)
vmap_unvmap_all = jax.vmap(unvmap_all, out_axes=None)
vmap_unvmap_any = jax.vmap(unvmap_any, out_axes=None)
tt = jnp.array([True, True])
tf = jnp.array([True, False])
ff = jnp.array([False, False])
assert jnp.array_equal(unvmap_all(tt), jnp.array(True))
assert jnp.array_equal(unvmap_all(tf), jnp.array(False))
assert jnp.array_equal(unvmap_all(ff), jnp.array(False))
assert jnp.array_equal(unvmap_any(tt), jnp.array(True))
assert jnp.array_equal(unvmap_any(tf), jnp.array(True))
assert jnp.array_equal(unvmap_any(ff), jnp.array(False))
assert jnp.array_equal(jit_unvmap_all(tt), jnp.array(True))
assert jnp.array_equal(jit_unvmap_all(tf), jnp.array(False))
assert jnp.array_equal(jit_unvmap_all(ff), jnp.array(False))
assert jnp.array_equal(jit_unvmap_any(tt), jnp.array(True))
assert jnp.array_equal(jit_unvmap_any(tf), jnp.array(True))
assert jnp.array_equal(jit_unvmap_any(ff), jnp.array(False))
assert jnp.array_equal(vmap_unvmap_all(tt), jnp.array(True))
assert jnp.array_equal(vmap_unvmap_all(tf), jnp.array(False))
assert jnp.array_equal(vmap_unvmap_all(ff), jnp.array(False))
assert jnp.array_equal(vmap_unvmap_any(tt), jnp.array(True))
assert jnp.array_equal(vmap_unvmap_any(tf), jnp.array(True))
assert jnp.array_equal(vmap_unvmap_any(ff), jnp.array(False))
unvmap_max = diffrax.misc.unvmap_max
jit_unvmap_max = jax.jit(unvmap_max)
vmap_unvmap_max = jax.vmap(unvmap_max, out_axes=None)
_21 = jnp.array([2, 1])
_11 = jnp.array([1, 1])
assert jnp.array_equal(unvmap_max(_21), jnp.array(2))
assert jnp.array_equal(unvmap_max(_11), jnp.array(1))
assert jnp.array_equal(jit_unvmap_max(_21), jnp.array(2))
assert jnp.array_equal(jit_unvmap_max(_11), jnp.array(1))
assert jnp.array_equal(vmap_unvmap_max(_21), jnp.array(2))
assert jnp.array_equal(vmap_unvmap_max(_11), jnp.array(1))
def test_nondifferentiable_input():
ndi = lambda x: diffrax.misc.nondifferentiable_input(x, "name")
ndi(
jnp.array(
2,
)
) # no error
with pytest.raises(ValueError):
jax.jvp(ndi, (jnp.array(2.0),), (jnp.array(1.0),))
with pytest.raises(ValueError):
jax.grad(ndi)(jnp.array(2.0))
def test_nondifferentiable_output():
ndo = diffrax.misc.nondifferentiable_output
ndo(jnp.array(2.0)) # no error
jax.jvp(ndo, (jnp.array(2.0),), (jnp.array(1.0),)) # no error
with pytest.raises(RuntimeError):
jax.grad(ndo)(jnp.array(2.0))
|
11549278
|
import os
import numpy as np
import argparse
import cv2 as cv
from time import time
import keras.backend as K
from utils.img_process import generate_train, get_region_mask
from utils.vgg16_gray import VGG16Gray
from utils.compare_patch import compare_patch
def save_train_feat(photo_path, sketch_path, weight_path, feature_layers, save_path='./Data/train_sketch_feat.hdf5', img_size=(288, 288)):
photo, sketch, _ = generate_train(photo_path, sketch_path, img_size, photo2gray=True)
sketch = (sketch[:, np.newaxis, :, :] * 255.0).astype('float64')
vgg16 = VGG16Gray(weight_path=weight_path)
sketch_features = vgg16.get_features(sketch, feature_layers)
np.savez(save_path, *sketch_features)
def generate_target_style(photo, sketch, test_path, base_pool, feature_layers, vgg_weight, compare_size=48, searching_range=6, img_size=(288, 288)):
all_photo_pool, all_sketch_pool = (photo*255).astype('float64'), (sketch*255).astype('float64')
photo_img = cv.imread(test_path)
photo_img = cv.resize(photo_img, img_size).transpose(2, 0, 1).astype('float64')
border_feat_net = VGG16Gray(img_size=(144, 144), weight_path=vgg_weight)
max_find = 15
min_find = 2
total_imgs = all_photo_pool.shape[0]
n_grid_y = 18
n_grid_x = 18
x_step = photo_img.shape[2]/n_grid_x
y_step = photo_img.shape[1]/n_grid_y
compare_shift = (compare_size - x_step)/2
target_feats = [np.ndarray(x.shape[1:]) for x in base_pool]
symb_patch_3 = np.ndarray(shape=(1,3,compare_size,compare_size)).astype('float32')
conv_weights_3 = K.variable(symb_patch_3)
candidate_3 = K.placeholder(shape=(total_imgs,3,compare_size+2*searching_range,compare_size+2*searching_range))
conv_res = K.conv2d(candidate_3,conv_weights_3)
f_conv_3 = K.function([candidate_3],conv_res)
for jj in range(n_grid_y):
for ii in range(n_grid_x):
this_patch = photo_img[ :,
max(0, jj*y_step - compare_shift): min(n_grid_y*y_step, (jj+1)*y_step + compare_shift),
max(0, ii*x_step - compare_shift): min(n_grid_x*x_step, (ii+1)*x_step + compare_shift)]
this_patch = this_patch[np.newaxis, ...]
if ii<min_find or ii>max_find or jj<min_find or jj>max_find:
this_patch_rep = np.repeat(this_patch, total_imgs, 0)
candidate_patch = all_photo_pool[:,:,max(0, jj*y_step - compare_shift): min(n_grid_y*y_step, (jj+1)*y_step + compare_shift),
max(0,ii*x_step - compare_shift): min(n_grid_x*x_step, (ii+1)*x_step + compare_shift)]
diff = this_patch_rep - candidate_patch
sq_diff = np.square(diff)
sq_diff = np.reshape(sq_diff,(sq_diff.shape[0],sq_diff.size/sq_diff.shape[0]))
sum_sq_diff = np.sum(sq_diff,1)
match_idx = np.argmin(sum_sq_diff)
for i in range(len(target_feats)):
x_step_i = x_step / 2**i
y_step_i = y_step / 2**i
target_feats[i][:, jj*y_step_i: (jj+1)*y_step_i, ii*x_step_i:(ii+1)*x_step_i] = base_pool[i][match_idx, :,
jj*y_step_i:(jj+1)*y_step_i, ii*x_step_i:(ii+1)*x_step_i]
else:
candidate_patch = all_photo_pool[ :, :,
max(0, jj*y_step - compare_shift - searching_range): min(n_grid_y*y_step, (jj+1)*y_step + compare_shift + searching_range),
max(0, ii*x_step - compare_shift - searching_range): min(n_grid_x*x_step, (ii+1)*x_step + compare_shift + searching_range)]
diff_photo = compare_patch(this_patch, candidate_patch, x_step, searching_range, compare_size, f_conv_3, conv_weights_3)
total_diff = diff_photo
min_y = 0
max_y = 2*searching_range+1
min_x = 0
max_x = 2*searching_range+1
feat_jj = 4
feat_ii = 4
if jj<5:
feat_jj = jj
min_y = searching_range
if jj>12:
feat_jj = jj-9
max_y = searching_range
if ii<5:
feat_ii = ii
min_x = searching_range
if ii>12:
feat_ii = ii-9
max_x = searching_range
max_diff = np.max(total_diff)
total_diff[:, min_y:max_y, min_x:max_x] = total_diff[:, min_y:max_y, min_x:max_x] - max_diff
best_index = np.argmin(total_diff)
best_index = np.unravel_index(best_index, total_diff.shape)
best_patch = best_index[0]
y_shift = best_index[1] - searching_range
x_shift = best_index[2] - searching_range
start_y = jj*y_step + y_shift-16*feat_jj
start_x = ii*x_step + x_shift-16*feat_ii
target_sketch = all_sketch_pool[best_patch, :, start_y:start_y+144, start_x:start_x+144]
target_sketch = np.expand_dims(target_sketch, 1)
border_patch_feat = border_feat_net.get_features(target_sketch, feature_layers)
for i in range(len(target_feats)):
x_step_i = x_step / 2**i
y_step_i = y_step / 2**i
target_feats[i][:, jj*y_step_i: (jj+1)*y_step_i, ii*x_step_i: (ii+1)*x_step_i] = border_patch_feat[i][
:, :, feat_jj*y_step_i:(feat_jj+1) * y_step_i, feat_ii*x_step_i: (feat_ii+1) * x_step_i]
target_gram = []
for f in target_feats:
f = f.reshape((f.shape[0], f.shape[1]*f.shape[2]))
f_gram = np.dot(f, f.transpose())
target_gram += [f_gram]
# generate nose region gram
nose_mask_pool = get_region_mask()
nose_gram = []
for idx, f in enumerate(target_feats):
f = f * nose_mask_pool[idx]
f = f.reshape((f.shape[0], f.shape[1]*f.shape[2]))
f_gram = np.dot(f, f.transpose())
nose_gram += [f_gram]
return target_gram, nose_gram
if __name__ == '__main__':
photo_path = './Data/photos'
sketch_path = './Data/sketches'
vgg_weight_path = './Weight/vgg16_gray.hdf5'
train_feat_path = './Data/train_sketch_feat.npz'
test_path = './Data/test/1.png'
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
if os.path.exists(train_feat_path):
print 'Train feature data base already exist'
else:
save_train_feat(photo_path, sketch_path, vgg_weight_path, feature_layers, save_path=train_feat_path)
feat = np.load(train_feat_path)
feat_base = [feat[x] for x in sorted(feat.files)]
# for idx, i in enumerate(feature_layers):
# tmp = np.load('../FM_train/sketch_feat%s.npy' % i)
# print tmp.shape, np.sum(tmp), np.sum(feat_base[idx])
# print np.linalg.norm(feat_base[idx] - tmp)
# exit()
# print [[x.shape, np.sum(x)] for x in feat_base]
photo, sketch, _ = generate_train(photo_path, sketch_path, size=(288, 288))
photo = photo.transpose(0, 3, 1, 2)
sketch = sketch[:, np.newaxis, :, :]
start = time()
target_gram, nose_gram = generate_target_style(photo, sketch, test_path, feat_base, feature_layers, vgg_weight_path, compare_size=48, searching_range=6, img_size=(288, 288))
end = time()
print 'Target style time', end - start
# print [x.shape for x in target_gram]
# print [x.shape for x in nose_gram]
|
11549290
|
import random
import math
import numpy as np
import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon import nn
import gluonnlp as nlp
class DotProductAttention(nn.Block):
def __init__(self, dropout, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
d = query.shape[-1]
scores = nd.batch_dot(query, key, transpose_b=True) / math.sqrt(d)
attention_weights = nlp.model.attention_cell._masked_softmax(nd, scores, mask, scores.dtype)
attention_weights = self.dropout(attention_weights)
return nd.batch_dot(attention_weights, value)
def transpose_qkv(X, num_heads):
# Shape after reshape: (batch_size, num_items, num_heads, p)
# 0 means copying the shape element, -1 means inferring its value
X = X.reshape((0, 0, num_heads, -1))
# Swap the num_items and the num_heads dimensions
X = X.transpose((0, 2, 1, 3))
# Merge the first two dimensions. Use reverse=True to infer
# shape from right to left
return X.reshape((-1, 0, 0), reverse=True)
def transpose_output(X, num_heads):
# A reversed version of transpose_qkv
X = X.reshape((-1, num_heads, 0, 0), reverse=True)
X = X.transpose((0, 2, 1, 3))
return X.reshape((0, 0, -1))
class MultiHeadAttention(nn.Block):
def __init__(self, units, num_heads, dropout, **kwargs): # units = d_o
super().__init__(**kwargs)
assert units % num_heads == 0
self.num_heads = num_heads
self.attention = DotProductAttention(dropout)
self.W_q = nn.Dense(units, use_bias=False, flatten=False)
self.W_k = nn.Dense(units, use_bias=False, flatten=False)
self.W_v = nn.Dense(units, use_bias=False, flatten=False)
# query, key, and value shape: (batch_size, num_items, dim)
# mask shape is (batch_size, query_length, memory_length)
def forward(self, query, key, value, mask):
# Project and transpose from (batch_size, num_items, units) to
# (batch_size * num_heads, num_items, p), where units = p * num_heads.
query, key, value = [transpose_qkv(X, self.num_heads) for X in (
self.W_q(query), self.W_k(key), self.W_v(value))]
if mask is not None:
# Replicate mask for each of the num_heads heads
mask = nd.broadcast_axis(nd.expand_dims(mask, axis=1),
axis=1, size=self.num_heads)\
.reshape(shape=(-1, 0, 0), reverse=True)
output = self.attention(query, key, value, mask)
# Transpose from (batch_size * num_heads, num_items, p) back to
# (batch_size, num_items, units)
return transpose_output(output, self.num_heads)
def position_encoding_init(max_length, dim):
X = nd.arange(0, max_length).reshape((-1,1)) / nd.power(
10000, nd.arange(0, dim, 2)/dim)
position_weight = nd.zeros((max_length, dim))
position_weight[:, 0::2] = nd.sin(X)
position_weight[:, 1::2] = nd.cos(X)
return position_weight
class PositionalEncoding(nn.Block):
def __init__(self, units, dropout=0, max_len=1000):
super().__init__()
self._max_len = max_len
self._units = units
self.position_weight = position_encoding_init(max_len, units)
self.dropout = nn.Dropout(dropout)
def forward(self, X):
pos_seq = nd.arange(X.shape[1]).expand_dims(0)
emb = nd.Embedding(pos_seq, self.position_weight, self._max_len, self._units)
return self.dropout(X + emb)
def print_side_by_side(*strings, sep='\t\t'):
split = [str(s).split("\n") for s in strings]
zipped = zip(*split)
for elems in zipped:
print(sep.join(elems))
|
11549302
|
from django.contrib import admin
from .models import Followers
@admin.register(Followers)
class FollowersAdmin(admin.ModelAdmin):
"""Друзья пользователя"""
list_display = ("subscribed", "friends", "in_friends", "in_followers", "id")
search_fields = ("subscribed",)
list_editable = ("in_friends", "in_followers")
|
11549338
|
import base64
import graphene
import os
from gtmcore.files import FileOperations
from gtmcore.logging import LMLogger
from lmsrvcore.auth.user import get_logged_in_username
from lmsrvcore.api.interfaces import GitRepository
from lmsrvcore.api.connections import ListBasedConnection
from lmsrvlabbook.api.objects.labbookfile import LabbookFile
from lmsrvlabbook.api.connections.labbookfileconnection import LabbookFileConnection
import redis
import json
logger = LMLogger.get_logger()
class LabbookSection(graphene.ObjectType):
"""A type representing a section within a LabBook (i.e., code, input, output)
"""
class Meta:
interfaces = (graphene.relay.Node, GitRepository)
# Section name (code, input, output)
section = graphene.String()
# List of files and directories, given a relative root directory within the section
files = graphene.relay.ConnectionField(LabbookFileConnection, root_dir=graphene.String())
# List of all files and directories within the section
all_files = graphene.relay.ConnectionField(LabbookFileConnection)
has_files = graphene.Boolean()
@classmethod
def get_node(cls, info, id):
"""Method to resolve the object based on it's Node ID"""
# Parse the key
owner, name, section = id.split("&")
return LabbookSection(owner=owner, name=name, section=section)
def resolve_id(self, info):
"""Resolve the unique Node id for this object"""
if not self.id:
if not self.owner or not self.name or not self.section:
raise ValueError("Resolving a LabbookSection Node ID requires owner, name, and section to be set")
self.id = f"{self.owner}&{self.name}&{self.section}"
return self.id
def helper_resolve_files(self, labbook, kwargs):
"""Helper method to populate the LabbookFileConnection"""
base_dir = None
if 'root_dir' in kwargs:
if kwargs['root_dir']:
base_dir = kwargs['root_dir'] + os.path.sep
base_dir = base_dir.replace(os.path.sep + os.path.sep, os.path.sep)
# Get all files and directories, with the exception of anything in .git or .gigantum
edges = FileOperations.listdir(labbook, self.section, base_path=base_dir, show_hidden=False)
# Generate naive cursors
cursors = [base64.b64encode("{}".format(cnt).encode("UTF-8")).decode("UTF-8") for cnt, x in enumerate(edges)]
# Process slicing and cursor args
lbc = ListBasedConnection(edges, cursors, kwargs)
lbc.apply()
edge_objs = []
for edge, cursor in zip(lbc.edges, lbc.cursors):
create_data = {"owner": self.owner,
"section": self.section,
"name": self.name,
"key": edge['key'],
"_file_info": edge}
edge_objs.append(LabbookFileConnection.Edge(node=LabbookFile(**create_data), cursor=cursor))
return LabbookFileConnection(edges=edge_objs, page_info=lbc.page_info)
def resolve_files(self, info, **kwargs):
"""Resolver for getting file listing in a single directory"""
return info.context.labbook_loader.load(f"{get_logged_in_username()}&{self.owner}&{self.name}").then(
lambda labbook: self.helper_resolve_files(labbook, kwargs))
def resolve_has_files(self, info, **kwargs):
def _hf(lb):
p = os.path.join(lb.root_dir, self.section)
for rootd, dirs, files in os.walk(p):
for f in files:
if f != '.gitkeep':
return True
return False
return info.context.labbook_loader.load(f"{get_logged_in_username()}&{self.owner}&{self.name}").then(
_hf
)
def helper_resolve_all_files(self, labbook, kwargs):
"""Helper method to populate the LabbookFileConnection"""
# Check if file info has been cached
redis_conn = redis.Redis(db=5)
cache_key = f"FILE_LIST_CACHE|{labbook.key}|{self.section}"
if redis_conn.exists(cache_key):
# Load from cache
edges = json.loads(redis_conn.get(cache_key).decode("utf-8"))
redis_conn.expire(cache_key, 5)
else:
# Load from disk and cache
# Get all files and directories, with the exception of anything in .git or .gigantum
edges = FileOperations.walkdir(labbook, section=self.section, show_hidden=False)
redis_conn.set(cache_key, json.dumps(edges))
redis_conn.expire(cache_key, 5)
# Generate naive cursors
cursors = [base64.b64encode("{}".format(cnt).encode("UTF-8")).decode("UTF-8") for cnt, x in enumerate(edges)]
# Process slicing and cursor args
lbc = ListBasedConnection(edges, cursors, kwargs)
lbc.apply()
edge_objs = []
for edge, cursor in zip(lbc.edges, lbc.cursors):
create_data = {"owner": self.owner,
"section": self.section,
"name": self.name,
"key": edge['key'],
"_file_info": edge}
edge_objs.append(LabbookFileConnection.Edge(node=LabbookFile(**create_data), cursor=cursor))
return LabbookFileConnection(edges=edge_objs, page_info=lbc.page_info)
def resolve_all_files(self, info, **kwargs):
"""Resolver for getting all files in a LabBook section"""
return info.context.labbook_loader.load(f"{get_logged_in_username()}&{self.owner}&{self.name}").then(
lambda labbook: self.helper_resolve_all_files(labbook, kwargs))
|
11549363
|
from __future__ import absolute_import
import csv
import io
import re
from .base import HttpSite
class CsvSite(HttpSite):
_delim = None
@property
def dialect(self):
if "pattern" not in self.conf:
return "excel"
class DelimDialect(csv.excel):
delimiter = str(self.delim)
skipinitialspace = True
return DelimDialect()
@property
def delim(self):
return self._delim or self.conf.get("pattern", ",")
def get_content(self):
r = super(CsvSite, self).get_content()
body = r.text
if len(self.delim) > 1:
body = re.sub(self.conf["pattern"], "|", body)
self._delim = "|"
buf = io.StringIO(body)
csvfile = csv.reader(buf, dialect=self.dialect)
return csvfile
def run(self):
r = self._req(self.conf["request"])
body = r.text
if len(self.delim) > 1:
body = re.sub(self.conf["pattern"], "|", body)
self._delim = "|"
buf = io.StringIO(body)
csvfile = csv.reader(buf, dialect=self.dialect)
for (lineno, row) in enumerate(csvfile):
for parser in self.conf["results"]:
start = parser.get("start", 1)
stop = parser.get("end", None)
# raise ValueError(start, stop)
#pylint: disable=len-as-condition
if lineno < start or len(row) == 0 or row[0].startswith("#"):
continue
elif stop is not None and lineno > stop:
break
if "match" in parser:
rex = re.compile(parser["match"]["regex"])
col = int(parser["match"]["column"])
if not rex.search(row[col]):
continue
row = [item.strip() for item in row]
result_dict = dict(zip(parser["values"], row))
yield self.build_result(parser, result_dict)
|
11549377
|
from typing import Dict, List
import torch
from torch.distributions import Uniform
from kornia.augmentation.random_generator.base import RandomGeneratorBase
from kornia.augmentation.utils import _adapted_rsampling, _common_param_check, _joint_range_check, _range_bound
class PlanckianJitterGenerator(RandomGeneratorBase):
r"""Generate random planckian jitter parameters for a batch of images
"""
def __init__(self, domain: List[float]) -> None:
super().__init__()
self.domain = domain
def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:
idx_range = _range_bound(self.domain,
'idx_range',
device=device, dtype=dtype)
_joint_range_check(idx_range, 'idx_range', (0, self.domain[1]))
self.pl_idx_dist = Uniform(idx_range[0], idx_range[1], validate_args=False)
def forward(self, batch_shape: torch.Size, same_on_batch: bool =
False) -> Dict[str, torch.Tensor]:
batch_size = batch_shape[0]
_common_param_check(batch_size, same_on_batch)
pl_idx = _adapted_rsampling((batch_size,),
self.pl_idx_dist,
same_on_batch)
return dict(idx=pl_idx.long())
|
11549398
|
from __future__ import print_function
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import gzip
import random
import serial
from serial.serialutil import SerialTimeoutException
from StringIO import StringIO
import sys
import time
import zipfile
def guess_port():
port = None
for pattern in "/dev/ttyACM? /dev/ttyUSB? /dev/tty.usbserial* /dev/tty.usbmodem* /dev/tty.wchusbserial*".split():
matches = glob.glob(pattern)
if matches:
return matches[0]
print("Opening port")
USE_TIMEOUT = 0
ser = serial.Serial(guess_port(), timeout=0, write_timeout=0.5 if USE_TIMEOUT else None)
print("Set baudrate")
ser.baudrate = 115200
fn = None
for arg in sys.argv[1:]:
fn = arg
data = open(fn).read()
print("Sending %s to port and dumping whatever comes back" % fn)
n_out = n_in = 0
received = []
n_retries = 0
print("Writing %d (%x) bytes" % (len(data), len(data)))
addr = 0
for c in data:
while True:
v = ord(c)
print("%04x: %02x %c" % (addr, v, c if 32 < v < 127 else '.'))
addr += 1
try:
n = ser.write(c)
except SerialTimeoutException:
n = 0
print(n)
#time.sleep(0.01)
#print `ser.read(3)`
if not USE_TIMEOUT: break
# try receiving
r = ser.read(1000)
if r:
print("RECEIVED", repr(r))
received.append(r)
if n:
break # next char
time.sleep(0.01)
print("RETRY", end=' ')
n_retries += 1
print("Waiting for final serial loopback")
start = time.time()
while (time.time() - start) < 0.5:
r = ser.read()
if not r:
time.sleep(0.1)
continue
# we got something, so reset the timeout
start = time.time()
print(repr(r))
received.append(r)
print("ALL SENT")
received = ''.join(received)
print("This is what we received:")
print(repr(received))
n = len(received)
print("%d (0x%x) bytes (%d missing). %d retries." % (n, n, len(data) - n, n_retries))
|
11549407
|
from typing import List, Mapping, Dict, Union, Iterable, Callable, Optional, Any
from dataclasses import dataclass, replace, field
from inspect import signature
from .types import ElementArg, ElementFn, DispatchEvent, ReceiveEvent
@dataclass
class ElementCallbacks:
click: Optional[Callable] = None
hoverin: Optional[Callable] = None
hoverout: Optional[Callable] = None
@dataclass
class EventCallbacks:
dispatch: Optional[Callable[[DispatchEvent], Any]] = None
receive: Optional[Callable[[ReceiveEvent], Any]] = None
messages: Optional[Dict[str, Union[Callable[[], Any], Callable[[str], Any]]]] = None
nodes: Optional[Dict[str, ElementCallbacks]] = None
@dataclass
class ElementContext:
ids: List[str]
data: Union[List[Any], None] = None
withQ: Optional[str] = None
animation: Optional[Mapping] = None
parentkey: str = ""
parent: Optional["ElementContext"] = None
callbacks: EventCallbacks = field(default_factory=EventCallbacks)
client: Any = None # used to maintain a reference to a custom client object
def eval_element_value(value: ElementArg[Any], data: Any, index: int) -> Any:
if callable(value):
num_args = len(signature(value).parameters)
return (
value() # type: ignore
if num_args == 0
else value(data) # type: ignore
if num_args == 1
else value(data, index) # type: ignore
)
else:
return value
def eval_element_dict(raw_dict: ElementArg[Mapping], data: Any, index: int) -> Mapping:
# evaluate the entire dict as a function
if callable(raw_dict):
return eval_element_value(raw_dict, data, index)
else:
if all([not callable(raw_dict[k]) for k in raw_dict]):
# simply return the dict if it has no function keys
return raw_dict
# evaluate each key which has a function
new_dict = {}
for k in raw_dict:
new_dict[k] = eval_element_value(raw_dict[k], data, index)
return new_dict
def apply_attrs(
context: ElementContext,
attr_fn: Callable[[Any, int, int], Mapping], # (data, data_index, element_index)
):
if context.parent is None:
if context.data is None or context.callbacks.dispatch is None:
return
attrs = attr_fn(context.data[0], 0, 0)
return context.callbacks.dispatch(
{
"attrs": attrs,
**(
{"animation": context.animation}
if context.animation is not None
else {}
),
**(
{"withQ": None if context.withQ == "null" else context.withQ}
if context.withQ is not None
else {}
),
}
)
def parent_attr_fn(data, data_index: int, _):
attr_dict = {}
for (i, k) in enumerate(context.ids):
attr_dict[k] = (
attr_fn(context.data[i], i, i) # use current data
if context.data is not None
else attr_fn(data, data_index, i) # use parent data
)
return {context.parentkey: attr_dict}
# apply attributes on the parent
apply_attrs(
replace(context.parent, withQ=context.withQ, animation=context.animation),
parent_attr_fn,
)
def add_element_callback(
context: ElementContext, # with the canvas as its parent
event_type: str, # key of ElementCallbacks
fn: ElementFn,
):
if context.parent is None or context.data is None:
return
cbs = context.callbacks
elementkey = context.parentkey
element_cbs_dict: Dict[str, ElementCallbacks] = {**(getattr(cbs, elementkey) or {})}
for (i, k) in enumerate(context.ids):
if k not in element_cbs_dict:
element_cbs_dict[k] = ElementCallbacks()
# callback closure
def callback(i=i):
eval_element_value(fn, context.data[i], i)
element_cbs_dict[k] = replace(element_cbs_dict[k], **{event_type: callback})
setattr(context.callbacks, elementkey, element_cbs_dict)
|
11549412
|
import ast
import os
import sys
import warnings
import pandas as pd
from pandas.api.types import CategoricalDtype
if not sys.warnoptions:
warnings.simplefilter("ignore")
import json
def load(filepath):
# From https://github.com/mdeff/fma/blob/rc1/utils.py / MIT License
filename = os.path.basename(filepath)
if "features" in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if "echonest" in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if "genres" in filename:
return pd.read_csv(filepath, index_col=0)
if "tracks" in filename:
tracks = pd.read_csv(filepath, index_col=0, header=[0, 1])
COLUMNS = [
("track", "tags"),
("album", "tags"),
("artist", "tags"),
("track", "genres"),
("track", "genres_all"),
]
for column in COLUMNS:
tracks[column] = tracks[column].map(ast.literal_eval)
COLUMNS = [
("track", "date_created"),
("track", "date_recorded"),
("album", "date_created"),
("album", "date_released"),
("artist", "date_created"),
("artist", "active_year_begin"),
("artist", "active_year_end"),
]
for column in COLUMNS:
tracks[column] = pd.to_datetime(tracks[column])
SUBSETS = ("small", "medium", "large")
tracks["set", "subset"] = tracks["set", "subset"].astype(
CategoricalDtype(categories=SUBSETS, ordered=True)
)
COLUMNS = [
("track", "genre_top"),
("track", "license"),
("album", "type"),
("album", "information"),
("artist", "bio"),
]
for column in COLUMNS:
tracks[column] = tracks[column].astype("category")
return tracks
def get_id_from_path(path):
base_name = os.path.basename(path)
return base_name.replace(".mp3", "").replace(".npy", "")
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("--metadata_path")
args = parser.parse_args()
base_path = Path(args.metadata_path)
in_path = base_path / "tracks.csv"
genres_path = base_path / "genres.csv"
out_path = base_path / "tracks_genre.json"
mapping_path = base_path / "mapping.json"
df = load(in_path)
df2 = pd.read_csv(genres_path)
id_to_title = {k: v for k, v in zip(df2.genre_id.tolist(), df2.title.tolist())}
df.reset_index(inplace=True)
print(df.head())
print(df.columns.values)
print(set(df[("set", "subset")].tolist()))
df = df[df[("set", "subset")].isin(["small"])]
print(set(df[("track", "genre_top")].tolist()))
print(
df[
[
("track_id", ""),
("track", "genre_top"),
("track", "genres"),
("set", "subset"),
]
]
)
data = {
k: v
for k, v in zip(
df[("track_id", "")].tolist(), df[("track", "genre_top")].tolist()
)
}
json.dump(data, open(out_path, "w"), indent=4)
mapping = {k: i for i, k in enumerate(set(df[("track", "genre_top")].tolist()))}
json.dump(mapping, open(mapping_path, "w"), indent=4)
|
11549417
|
import os
import time
from sources import STOP
import settings
class Commands:
def __init__(self, stop, epsilon, discount, update_target_every, min_reward, save_checkpoint_every, seconds_per_episode, agent_show_preview, optimizer, car_npcs):
self.stop = stop
self.epsilon = epsilon
self.discount = discount
self.update_target_every = update_target_every
self.min_reward = min_reward
self.save_checkpoint_every = save_checkpoint_every
self.seconds_per_episode = seconds_per_episode
self.agent_show_preview = agent_show_preview
self.optimizer = optimizer
self.car_npcs = car_npcs
def process(self):
output = ''
error = ''
# Check every file...
for command_file in os.listdir('tmp'):
# ...whose name starts witn a command prefix...
if not command_file.startswith('command_'):
continue
# ...and try to open and read
try:
time.sleep(0.01)
with open('tmp/' + command_file, encoding='utf-8') as f:
command = f.read().split()
command[0] = command[0].lower()
# Epsilon value
if command[0] == 'epsilon' and command[1] == 'current':
value = float(command[2])
if value > 1 or value < 0:
raise ValueError(f'Epsilon value {value} out of range')
old_value = self.epsilon[0]
self.epsilon[0] = value
output += f'Epsilon value updated from {old_value} to {value}\n'
# Epsilon decay
elif command[0] == 'epsilon' and command[1] == 'decay':
value = float(command[2])
if value > 1 or value < 0:
raise ValueError(f'Epsilon decay value {value} out of range')
old_value = self.epsilon[1]
self.epsilon[1] = value
output += f'Epsilon decay value updated from {old_value} to {value}\n'
# Minimum epsilon
elif command[0] == 'epsilon' and command[1] == 'min':
value = float(command[2])
if value > 1 or value < 0:
raise ValueError(f'Minimum epsilon value {value} out of range')
old_value = self.epsilon[2]
self.epsilon[2] = value
output += f'Minimum epsilon value updated from {old_value} to {value}\n'
# Discount
elif command[0] == 'discount':
value = float(command[1])
if value > 1 or value < 0:
raise ValueError(f'Minimum discount value {value} out of range')
old_value = self.discount.value
self.discount.value = value
output += f'Discount value updated from {old_value} to {value}\n'
# Target network update interval
elif command[0] == 'target' and command[1] == 'update_every':
value = int(command[2])
if value < 0:
raise ValueError(f'Target network update every value {value} out of range')
old_value = self.update_target_every.value
self.update_target_every.value = value
output += f'Target network update every value updated from {old_value} to {value}\n'
# Minimum reward for model saving
elif command[0] == 'reward' and command[1] == 'min':
value = int(command[2])
old_value = self.min_reward.value
self.min_reward.value = value
output += f'Minimum reward value updated from {old_value} to {value}\n'
# Checkpoint save interval
elif command[0] == 'checkpoint' and command[1] == 'save_every':
value = int(command[2])
if value < 0:
raise ValueError(f'Save checkpoint every value {value} out of range')
old_value = self.save_checkpoint_every.value
self.save_checkpoint_every.value = value
output += f'Save checkpoint every value updated from {old_value} to {value}\n'
# Episode duration time
elif command[0] == 'episode' and command[1] == 'duration':
value = int(command[2])
if value < 0:
raise ValueError(f'Episode duration value {value} out of range')
old_value = self.seconds_per_episode.value
self.seconds_per_episode.value = value
output += f'Episode duration value updated from {old_value} to {value}\n'
# Learning rate
elif command[0] == 'optimizer' and command[1] == 'lr':
value = float(command[2])
if value < 0 or value > 1:
raise ValueError(f'Learning rate value {value} out of range')
self.optimizer[2] = 1
self.optimizer[3] = value
output += f'Learning rate value updated from {self.optimizer[0]} to {value}\n'
# Decay
elif command[0] == 'optimizer' and command[1] == 'decay':
value = float(command[2])
if value < 0 or value > 1:
raise ValueError(f'Decay value {value} out of range')
self.optimizer[4] = 1
self.optimizer[5] = value
output += f'Decay value updated from {self.optimizer[1]} to {value}\n'
# Agent preview
elif command[0] == 'preview':
camera_values = [0, 0, 0, 0, 0]
agent = int(command[1]) if command[1] != 'all' else -1
if command[2] == 'on' or command[2] == 'env':
value = 1
elif command[2] == 'agent':
value = 2
elif command[2].startswith('cam_'):
camera = int(command[2].split('_')[1]) - 1
if camera < 0 or camera >= len(settings.PREVIEW_CAMERA_RES):
raise Exception('Agent preview camera does not exist')
value = camera + 10
elif command[2] == 'off':
value = 0
elif ',' in command[2]:
camera_values = [float(value) for value in command[2].split(',')]
if len(camera_values) != 5:
raise Exception('Agent preview custom camera requires exactly 5 numerical values')
value = 3
else:
raise Exception('Agent preview subcommand invalid')
if agent != -1 and (agent < 1 or agent > len(self.agent_show_preview)):
raise ValueError(f'Agent number {value} out of range')
if agent == -1:
for agent in range(len(self.agent_show_preview)):
self.agent_show_preview[agent][0] = value
self.agent_show_preview[agent][1] = camera_values[0]
self.agent_show_preview[agent][2] = camera_values[1]
self.agent_show_preview[agent][3] = camera_values[2]
self.agent_show_preview[agent][4] = camera_values[3]
self.agent_show_preview[agent][5] = camera_values[4]
output += f'Preview for all agents toggled {"on" if value else "off"}\n'
else:
self.agent_show_preview[agent - 1][0] = value
self.agent_show_preview[agent - 1][1] = camera_values[0]
self.agent_show_preview[agent - 1][2] = camera_values[1]
self.agent_show_preview[agent - 1][3] = camera_values[2]
self.agent_show_preview[agent - 1][4] = camera_values[3]
self.agent_show_preview[agent - 1][5] = camera_values[4]
output += f'Preview for agent {agent} toggled {"on" if value else "off"}\n'
# Car NPCs
elif command[0] == 'carnpcs' and command[1] == 'keep':
value = int(command[2])
if value < 0 or value > 500:
raise ValueError(f'Car NPC number value {value} out of range')
old_value = self.car_npcs[0]
self.car_npcs[0] = value
output += f'Car NPC number value updated from {old_value} to {value}\n'
# Car NPCs' reset interval
elif command[0] == 'carnpcs' and command[1] == 'reset_interval':
value = int(command[2])
if value < 0:
raise ValueError(f'Car NPC reset interval value {value} out of range')
old_value = self.car_npcs[0]
self.car_npcs[0] = value
output += f'Car NPC reset interval value updated from {old_value} to {value}\n'
# Stop training
elif command[0] == 'stop':
if command[1] == 'now':
self.stop.value = STOP.now
elif command[1] == 'checkpoint':
self.stop.value = STOP.at_checkpoint
else:
raise Exception(f'Stop subcommand invalid')
output += f'Stopping\n'
# 'Values' command
elif command[0] == 'values':
output += '\nCurrent values:\n'
output += f'epsilon = {str(list(self.epsilon))} # [current, decay, min]\n'
output += f'discount = {self.discount.value}\n'
output += f'update_target_every = {self.update_target_every.value}\n'
output += f'min_reward = {self.min_reward.value}\n'
output += f'agent_show_preview = {[agent+1 for agent, state in enumerate(self.agent_show_preview) if state.value]}\n'
output += f'save_checkpoint_every = {self.save_checkpoint_every.value}\n'
output += f'seconds_per_episode = {self.seconds_per_episode.value}\n'
output += f'optimizer = [{self.optimizer[0]}, {self.optimizer[1]}] # [lr, decay]\n'
output += f'car_npcs = [{self.car_npcs[0]}, {self.car_npcs[1]}] # [keep, interval]\n'
# Wrong command
else:
output += f'Command not recognized'
except Exception as e:
error += str(e) + '\n'
# Remove command file
try:
os.remove('tmp/' + command_file)
except Exception as e:
error += str(e) + '\n'
# Add error messages to the output, if there are any
output += error
# Save response file
if output:
with open(f'tmp/output_{int(time.time())}', 'w', encoding='utf-8') as f:
f.write(output)
|
11549426
|
import cv2
import numpy as np
import sys
# argv is your commandline arguments, argv[0] is your program name, so skip it
# for n in sys.argv[1:]:
# print(n) #print out the filename we are currently processing
# input = open(n, "r")
# output = open(n + ".out", "w")
# # do some processing
# input.close()
# output.close()
for n in range(10000):
n = n+1
# link = os.path.join(sys.argv[1],'frame_%08d.png')%n
# print(link)
img1 = cv2.imread('/media/analogicalnexus/00EA777C1E864BA9/2018/simulator_dataset/3/images/frame_%08d.png'%n,0)
img2 = cv2.imread('/media/analogicalnexus/00EA777C1E864BA9/2018/simulator_dataset/3_wo/images/frame_%08d.png'%n,0)
# image subtraction
img3 = img1-img2
ret,thresh1 = cv2.threshold(img3,0,255,cv2.THRESH_BINARY)
# hole filling
kernel = np.ones((5,5),np.uint8)
thresh1 = cv2.dilate(thresh1,kernel,iterations = 2)
# h, w = thresh1.shape[:2]
# mask = np.zeros((h+2, w+2), np.uint8)
# # Floodfill from point (0, 0)
# cv2.floodFill(thresh1, mask, (0,0), 255);
cv2.imwrite('/media/analogicalnexus/00EA777C1E864BA9/2018/simulator_dataset/3_mask/mask_%08d.png'%n, thresh1)
# cv2.imshow('result',thresh1)
# cv2.waitKey()
# cv2.destroyAllWindows()
|
11549467
|
from __future__ import division
from __future__ import print_function
from glob import glob
import os
import numpy as np
from numpy import testing as npt
from deepcpg.data import hdf, CPG_NAN
from deepcpg.data.fasta import read_chromo
from deepcpg.data.dna import CHAR_TO_INT
class TestMake(object):
def setup_class(self):
self.data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data')
self.data_files = glob(os.path.join(self.data_path, 'c*.h5'))
names = ['chromo', 'pos',
'/inputs/dna',
'/inputs/cpg/BS27_4_SER/dist',
'/inputs/cpg/BS27_4_SER/state',
'/inputs/cpg/BS28_2_SER/dist',
'/inputs/cpg/BS28_2_SER/state',
'/inputs/annos/exons',
'/inputs/annos/CGI',
'/outputs/cpg/BS27_4_SER',
'/outputs/cpg/BS28_2_SER',
'/outputs/cpg_stats/mean',
'/outputs/cpg_stats/var',
'/outputs/cpg_stats/cat_var',
'/outputs/cpg_stats/cat2_var',
'/outputs/cpg_stats/diff',
'/outputs/cpg_stats/mode',
]
self.data = hdf.read(self.data_files, names)
self.chromo = self.data['chromo']
self.pos = self.data['pos']
def _test_outputs(self, name, expected):
actual = self.data['/outputs/%s' % name]
for e in expected:
idx = (self.chromo == e[0].encode()) & (self.pos == e[1])
assert idx.sum() == 1
assert actual[idx] == e[2]
def test_outputs(self):
expected = [('18', 3000023, 1.0),
('18', 3000086, 1.0),
('18', 3012584, 0.0),
('19', 4398070, 0.0),
('19', 4428709, 1.0),
('19', 4442494, 0.0),
('19', 4447847, 1.0)
]
self._test_outputs('cpg/BS27_4_SER', expected)
expected = [('18', 3000092, 1.0),
('18', 3010064, 0.0),
('18', 3140338, 1.0),
('18', 3143169, 0.0),
('19', 4187854, 1.0),
('19', 4190571, 0.0),
('19', 4192788, 0.0),
('19', 4202077, 0.0)
]
self._test_outputs('cpg/BS28_2_SER', expected)
def _test_dna(self, chromo):
pos = self.pos[self.chromo == chromo.encode()]
dna = self.data['/inputs/dna'][self.chromo == chromo.encode()]
dna_wlen = dna.shape[1]
center = dna_wlen // 2
dna_seq = read_chromo(os.path.join(self.data_path, '../dna_db'),
chromo)
idxs = np.linspace(0, len(pos) - 1, 100).astype(np.int32)
for idx in idxs:
p = pos[idx] - 1
assert dna_seq[p:(p + 2)] == 'CG'
assert dna[idx, center] == 3
assert dna[idx, center + 1] == 2
assert dna[idx, center + 10] == CHAR_TO_INT[dna_seq[p + 10]]
assert dna[idx, center - 10] == CHAR_TO_INT[dna_seq[p - 10]]
def test_dna(self):
dna = self.data['/inputs/dna']
dna_wlen = dna.shape[1]
center = dna_wlen // 2
assert np.all(dna[:, center] == 3)
assert np.all(dna[:, (center + 1)] == 2)
self._test_dna('18')
self._test_dna('19')
def _test_cpg_neighbors(self, name, expected):
data_name = '/inputs/cpg/%s/' % name
dist = self.data[data_name + 'dist'][100:-100]
center = dist.shape[1] // 2
assert np.all((dist[:, center - 2] > dist[:, center - 1]) |
(dist[:, center - 2] == CPG_NAN))
assert np.all((dist[:, center] < dist[:, center + 1]) |
(dist[:, center + 1] == CPG_NAN))
for exp in expected:
pos = exp[1]
idx = (self.chromo == exp[0].encode()) & (self.pos == pos)
assert self.pos[idx] == pos
assert np.sum(idx) == 1
state = self.data[data_name + 'state'][idx].ravel()
dist = self.data[data_name + 'dist'][idx].ravel()
for i, left in enumerate(exp[2]):
exp_dist = pos - left[0]
exp_state = left[1]
assert dist[center - i - 1] == exp_dist
assert state[center - i - 1] == exp_state
for i, right in enumerate(exp[3]):
exp_dist = right[0] - pos
exp_state = right[1]
assert dist[center + i] == exp_dist
assert state[center + i] == exp_state
def test_cpg_neighbors(self):
name = 'BS27_4_SER'
expected = [
('18', 3000086,
((3000023, 1.0),),
((3000092, 1.0), (3000163, 0.0), (3000310, 1.0))
),
('18', 3000734,
((3000612, 1.0), (3000315, 1.0), (3000310, 1.0), (3000163, 0.0)),
((3000944, 0.0), (3001029, 0.0), (3001188, 0.0), (3004806, 1.0))
),
('18', 3047425,
((3047423, 1.0), (3046073, 0.0), (3046067, 1.0), (3046046, 0.0)),
((3047447, 0.0), (3047969, 0.0), (3047981, 0.0), (3047983, 1.0))
),
('19', 4364170,
((4363861, 1.0), (4362993, 1.0), (4359854, 1.0), (4359157, 1.0)),
((4372573, 1.0), (4376976, 1.0), (4377019, 1.0), (4378828, 1.0))
),
('19', 4428709,
((4410664, 1.0), (4407849, 0.0), (4406810, 1.0), (4406758, 1.0)),
((4429964, 1.0), (4429969, 1.0), (4430127, 1.0), (4430346, 0.0))
),
('19', 4447818,
((4447814, 1.0), (4447803, 1.0)),
((4447821, 1.0), (4447847, 1.0))
)
]
self._test_cpg_neighbors(name, expected)
name = 'BS28_2_SER'
expected = [
('18', 3010211,
((3010138, 1.0), (3010136, 0.0), (3010075, 1.0), (3010064, 0.0)),
((3010417, 1.0), (3010759, 1.0), (3012388, 1.0), (3012676, 1.0))
),
('18', 3039508,
((3038883, 1.0), (3038680, 0.0), (3038462, 1.0), (3031302, 0.0)),
((3039540, 1.0), (3039543, 1.0), (3039805, 1.0), (3039828, 1.0))
),
('19', 4201639,
((4201628, 0.0), (4201623, 0.0), (4201621, 1.0), (4201599, 0.0)),
((4201645, 0.0), (4201657, 0.0), (4201677, 0.0), (4201688, 0.0))
),
('19', 4185486,
((4185440, 1.0), (4184916, 1.0), (4184889, 0.0)),
((4185488, 0.0), (4186125, 0.0), (4187662, 1.0))
),
('19', 4201967,
((4201946, 0.0), (4201923, 0.0), (4201821, 0.0)),
((4201972, 0.0), (4202077, 0.0))
)
]
self._test_cpg_neighbors(name, expected)
def _test_stats(self, chromo, pos, stat, value):
idx = (self.chromo == chromo.encode()) & (self.pos == pos)
stat = self.data['/outputs/cpg_stats/%s' % stat][idx]
assert stat == value
def test_stats(self):
self._test_stats('18', 3010417, 'mean', 1.0)
self._test_stats('18', 3010417, 'var', 0.0)
self._test_stats('18', 3010417, 'diff', 0)
self._test_stats('18', 3010417, 'mode', 1)
self._test_stats('18', 3012173, 'mean', 0.0)
self._test_stats('18', 3012173, 'var', 0.0)
self._test_stats('18', 3012173, 'diff', 0)
self._test_stats('18', 3012173, 'mode', 0)
self._test_stats('18', 3052129, 'mean', 1.0)
self._test_stats('18', 3052129, 'var', 0.0)
self._test_stats('18', 3052129, 'diff', 0)
self._test_stats('18', 3052129, 'mode', 1)
self._test_stats('18', 3071630, 'mean', 0.5)
self._test_stats('18', 3071630, 'var', 0.25)
self._test_stats('18', 3071630, 'diff', 1)
self._test_stats('18', 3071630, 'mode', 0)
self._test_stats('19', 4201704, 'mean', 0.0)
self._test_stats('19', 4201704, 'var', 0.0)
self._test_stats('19', 4201704, 'diff', 0)
self._test_stats('19', 4201704, 'mode', 0)
self._test_stats('19', 4190571, 'mean', 0.5)
self._test_stats('19', 4190571, 'var', 0.25)
self._test_stats('19', 4190571, 'diff', 1)
self._test_stats('19', 4190571, 'mode', 0)
self._test_stats('19', 4190700, 'mean', 0.0)
self._test_stats('19', 4190700, 'var', 0.0)
self._test_stats('19', 4190700, 'diff', 0)
self._test_stats('19', 4190700, 'mode', 0)
v = self.data['/outputs/cpg_stats/var']
assert np.all((v >= 0) & (v <= 0.25))
cv = self.data['/outputs/cpg_stats/cat_var']
assert np.all((cv == CPG_NAN) | (cv == 0) | (cv == 1) | (cv == 2))
assert np.all(cv[v == CPG_NAN] == CPG_NAN)
cv = self.data['/outputs/cpg_stats/cat2_var']
assert np.all((cv == CPG_NAN) | (cv == 0) | (cv == 1))
assert np.all(cv[v == CPG_NAN] == CPG_NAN)
def _test_annos(self, chromo, pos, name, expected):
idx = (self.chromo == chromo.encode()) & (self.pos == pos)
actual = int(self.data['/inputs/annos/%s' % name][idx])
assert actual == expected
def test_annos(self):
self._test_annos('18', 3000023, 'CGI', 0)
self._test_annos('18', 3000023, 'exons', 0)
self._test_annos('18', 3267095, 'exons', 1)
self._test_annos('18', 4375924, 'exons', 1)
self._test_annos('18', 5592169, 'exons', 1)
self._test_annos('18', 5592176, 'exons', 1)
self._test_annos('18', 5592182, 'exons', 1)
self._test_annos('18', 5592199, 'exons', 1)
self._test_annos('19', 4438754, 'exons', 0)
|
11549474
|
class GeometryObject(APIObject,IDisposable):
""" The common base class for all geometric primitives. """
def Dispose(self):
""" Dispose(self: APIObject,A_0: bool) """
pass
def Equals(self,obj):
"""
Equals(self: GeometryObject,obj: object) -> bool
Determines whether the specified System.Object is equal to the current
System.Object.
obj: Another object.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: GeometryObject) -> int
Gets the integer value of the geometry object as hash code
"""
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: GeometryObject) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __ne__(self,*args):
pass
GraphicsStyleId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The ElementId of the GeometryObject's GraphicsStyle
Get: GraphicsStyleId(self: GeometryObject) -> ElementId
"""
IsElementGeometry=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates whether this geometry is obtained directly from an Element.
Get: IsElementGeometry(self: GeometryObject) -> bool
"""
Visibility=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The visibility.
Get: Visibility(self: GeometryObject) -> Visibility
"""
|
11549554
|
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
class LogoutView(APIView):
def post(self, request):
if request.user.is_authenticated:
logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
|
11549562
|
from django.template import Library
from django.utils.safestring import mark_safe
import datetime ,time
register = Library()
@register.simple_tag
def build_filter_ele(filter_column,admin_class):
column_obj = admin_class.model._meta.get_field(filter_column)
print("column obj:",column_obj)
try:
#filter_column_name = "<span>%s</span>" % filter_column
filter_ele = "<div class='col-md-2'>%s<select class='form-control ' name='%s'>" % (filter_column,filter_column)
for choice in column_obj.get_choices():
selected = ''
if filter_column in admin_class.filter_condtions:#当前字段被过滤了
# print("filter_column", choice,
# type(admin_class.filter_condtions.get(filter_column)),
# admin_class.filter_condtions.get(filter_column))
if str(choice[0]) == admin_class.filter_condtions.get(filter_column):#当前值被选中了
selected = 'selected'
print('selected......')
option = "<option value='%s' %s>%s</option>" % (choice[0],selected,choice[1])
filter_ele += option
except AttributeError as e:
print("err",e)
filter_ele = "<div class='col-md-2'><select class='form-control' name='%s__gte'>" % filter_column
if column_obj.get_internal_type() in ('DateField','DateTimeField'):
time_obj = datetime.datetime.now()
time_list = [
['','------'],
[time_obj,'Today'],
[time_obj - datetime.timedelta(7),'七天内'],
[time_obj.replace(day=1),'本月'],
[time_obj - datetime.timedelta(90),'三个月内'],
[time_obj.replace(month=1,day=1),'YearToDay(YTD)'],
['','ALL'],
]
for i in time_list:
selected = ''
time_to_str = ''if not i[0] else "%s-%s-%s"%(i[0].year,i[0].month,i[0].day)
if "%s__gte"% filter_column in admin_class.filter_condtions: # 当前字段被过滤了
print('-------------gte')
if time_to_str == admin_class.filter_condtions.get("%s__gte"% filter_column): # 当前值被选中了
selected = 'selected'
option = "<option value='%s' %s>%s</option>" % \
(time_to_str ,selected,i[1])
filter_ele += option
filter_ele += "</select></div>"
return mark_safe(filter_ele)
@register.simple_tag
def build_table_row(obj,admin_class):
"""生成一条记录的html element"""
ele = ""
if admin_class.list_display:
for column_name in admin_class.list_display:
column_obj = admin_class.model._meta.get_field(column_name)
if column_obj.choices: #get_xxx_display
column_data = getattr(obj,'get_%s_display'% column_name)()
else:
column_data = getattr(obj,column_name)
td_ele = "<td>%s</td>"% column_data
ele += td_ele
else:
td_ele = "<td>%s</td>" % obj
ele += td_ele
return mark_safe(ele)
@register.simple_tag
def get_model_name(admin_class):
return admin_class.model._meta.model_name.upper()
@register.simple_tag
def get_sorted_column(column,sorted_column,forloop):
#sorted_column = {'name': '-0'}
if column in sorted_column:#这一列被排序了,
#你要判断上一次排序是什么顺序,本次取反
last_sort_index = sorted_column[column]
if last_sort_index.startswith('-'):
this_time_sort_index = last_sort_index.strip('-')
else:
this_time_sort_index = '-%s' % last_sort_index
return this_time_sort_index
else:
return forloop
@register.simple_tag
def render_filtered_args(admin_class,render_html=True):
'''拼接筛选的字段'''
if admin_class.filter_condtions:
ele = ''
for k,v in admin_class.filter_condtions.items():
ele += '&%s=%s' %(k,v)
if render_html:
return mark_safe(ele)
else:
return ele
else:
return ''
@register.simple_tag
def render_sorted_arrow(column,sorted_column):
if column in sorted_column: # 这一列被排序了,
last_sort_index = sorted_column[column]
if last_sort_index.startswith('-'):
arrow_direction = 'bottom'
else:
arrow_direction = 'top'
ele = '''<span class="glyphicon glyphicon-triangle-%s" aria-hidden="true"></span>''' % arrow_direction
return mark_safe(ele)
return ''
@register.simple_tag
def render_paginator(querysets,admin_class,sorted_column):
ele = '''
<ul class="pagination">
'''
for i in querysets.paginator.page_range:
if abs(querysets.number - i) < 2 :#display btn
active = ''
if querysets.number == i : #current page
active = 'active'
filter_ele = render_filtered_args(admin_class)
sorted_ele = ''
if sorted_column:
sorted_ele = '&_o=%s' % list(sorted_column.values())[0]
p_ele = '''<li class="%s"><a href="?_page=%s%s%s">%s</a></li>''' % (active,i,filter_ele,sorted_ele,i)
ele += p_ele
ele += "</ul>"
return mark_safe(ele)
@register.simple_tag
def get_current_sorted_column_index(sorted_column):
return list(sorted_column.values())[0] if sorted_column else ''
|
11549578
|
from app import app
from flask import Flask, request, render_template
import sqlite3
from flask_login import login_required
def get_data(rom, limit, offset, val_s, time_s):
conn = sqlite3.connect('data/db/'+rom+'.sql')
c = conn.cursor()
if val_s and val_s is not None:
print(val_s)
sql = "SELECT rowid,* FROM def WHERE value LIKE ? ORDER BY rowid DESC LIMIT ? OFFSET ?"
get = (val_s+'%', limit, offset,)
c.execute(sql, get)
elif time_s and time_s is not None:
print(time_s)
sql = "SELECT rowid,* FROM def WHERE time LIKE ? ORDER BY rowid DESC LIMIT ? OFFSET ?"
get = [time_s+'%', limit, offset]
c.execute(sql, get)
else:
sql = "SELECT rowid,* FROM def ORDER BY rowid DESC LIMIT ? OFFSET ?"
get = [limit, offset]
c.execute(sql, get)
data = c.fetchall()
conn.close()
return data
def get_count(rom, limit, offset, val_s, time_s):
conn = sqlite3.connect('data/db/'+rom+'.sql')
c = conn.cursor()
if val_s and val_s is not None:
sql = "SELECT count(*) FROM def WHERE value LIKE ? ORDER BY rowid DESC LIMIT ? OFFSET ?"
get = (val_s+'%', limit, offset,)
c.execute(sql, get)
elif time_s and time_s is not 'None':
sql = "SELECT count(*) FROM def WHERE time LIKE ? ORDER BY rowid DESC LIMIT ? OFFSET ?"
get = [time_s+'%', limit, offset]
c.execute(sql, get)
else:
sql = "SELECT count(*) FROM def ORDER BY rowid DESC LIMIT ? OFFSET ?"
get = [limit, offset]
c.execute(sql, get)
data = c.fetchone()
conn.close()
return data
@app.route('/settings/db/edit', methods=['GET', 'POST'])
@login_required
def db_edit():
if request.method == "GET":
rom = request.args.get("rom")
page = request.args.get("page")
limit = request.args.get("limit")
val_s = request.args.get("val_s")
time_s = request.args.get('time_s')
if request.form.get('send-clear') == 'yes':
conn = sqlite3.connect('data/db/'+rom+'.sql')
c = conn.cursor()
c.execute("DELETE FROM def")
conn.commit()
conn.close()
if request.args.get('edit-row') == 'yes':
rom = request.args.get('rom')
id = request.args.get('id')
val = request.args.get('val')
conn = sqlite3.connect('data/db/'+rom+'.sql')
c = conn.cursor()
sql = "UPDATE def SET value=? WHERE rowid=?"
c.execute(sql, (val, id,))
conn.commit()
conn.close()
if request.args.get('del-row') == 'yes':
rom = request.args.get('rom')
id = request.args.get('id')
conn = sqlite3.connect('data/db/'+rom+'.sql')
c = conn.cursor()
sql = "DELETE FROM def WHERE rowid=?"
c.execute(sql, (id,))
conn.commit()
conn.close()
offset= 0
if not page:
page=1
if not limit:
limit=100
if int(page)<1:
page=1
try:
count=get_count(rom, limit, offset, val_s, time_s)
count=count[0]
except:
count=0
pages = (int(count)//int(limit))+2
offset= (int(page)-1)*int(limit)
try:
data=get_data(rom, limit, offset, val_s, time_s)
except:
data=[]
return render_template('db_edit.html', data=data, count=int(count), pages=int(pages), limit=int(limit), page=int(page), rom=str(rom), val_s=str(val_s), time_s=str(time_s))
|
11549588
|
import requests
from requests import auth
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class HttpSession:
def __init__(self, base_url, headers, api_key=None, allow_self_signed_cert=False):
self._base_url = base_url
self._headers = headers
self._auth = ApiKeyAuth(api_key) if api_key else None
self._session = requests.Session()
if allow_self_signed_cert:
self._session.verify = False
def get_base_url(self):
return self._base_url
def get(self, path, request_headers=None):
return self._send_request(path, 'get', request_headers)
def delete(self, path, request_headers=None):
return self._send_request(path, 'delete', request_headers)
def post(self, path, data, request_headers=None):
return self._send_request(path, 'post', request_headers, data)
def patch(self, path, data, request_headers=None):
return self._send_request(path, 'patch', request_headers, data)
def _send_request(self, path, method, request_headers, data=None):
url = self._base_url + path
headers = {**(self._headers or {}), **(request_headers or {})}
return self._session.request(method, url, auth=self._auth, headers=headers, data=data)
class ApiKeyAuth(auth.AuthBase):
def __init__(self, api_key):
self.api_key = api_key
def __call__(self, request):
request.headers['Authorization'] = "Bearer " + self.api_key
return request
|
11549597
|
from flask import Flask
from DatabaseConnection import firebasecontroller as firebase_controller
from flask_ngrok import run_with_ngrok
app = Flask(__name__)
run_with_ngrok(app)
# this is to create a webHook for the DialogueFlow
@app.route("/", methods=['POST', 'GET'])
def data():
Database = firebase_controller.firebase_controller("Your DatabaseConnection name here ")
data_from_database = Database.get_service_data()
return data_from_database
# as the DialogueFlow requires a Https WebHook Url we use Flask Nagrok to make a temporary WebLink for
# testing purpose but later on we can also use the a paid service like Heroku or AWS
# run the app
if __name__ == '__main__':
app.run()
|
11549599
|
import regex
from pynab import log
from pynab.db import db_session, Release, Pre, Group, windowed_query
import config
GROUP_ALIASES = {
# from: to
'alt.binaries.etc': 'alt.binaries.teevee',
}
GROUP_REQUEST_REGEXES = {
'alt.binaries.etc': '^(\d{4,8})$',
'alt.binaries.teevee': '^(\d{4,8})$',
'alt.binaries.moovee': '^(\d{4,8})$',
}
def process(limit=None):
"""Process releases for requests"""
with db_session() as db:
requests = {}
for group, reg in GROUP_REQUEST_REGEXES.items():
# noinspection PyComparisonWithNone
query = db.query(Release).join(Group).filter(Group.name==group).filter(Release.pre_id == None).\
filter(Release.category_id == '8010').filter("releases.name ~ '{}'".format(reg))
for release in windowed_query(query, Release.id, config.scan.get('binary_process_chunk_size')):
# check if it's aliased
if release.group.name in GROUP_ALIASES:
group_name = GROUP_ALIASES[release.group.name]
else:
group_name = release.group.name
if group_name not in requests:
requests[group_name] = {}
result = regex.search(reg, release.name)
if result:
requests[group_name][result.group(0)] = release
else:
log.info("requests: no release requests to process")
# per-group
for group_name, group_requests in requests.items():
# query for the requestids
if requests:
pres = db.query(Pre).filter(Pre.requestgroup==group_name).filter(Pre.requestid.in_(group_requests.keys())).all()
else:
log.info("requests: no pre requests found")
pres = []
# loop through and associate pres with their requests
for pre in pres:
# no longer need to check group
updated_release = group_requests.get(str(pre.requestid))
updated_release.pre_id = pre.id
db.merge(updated_release)
log.info("requests: found pre request id {} ({}) for {}".format(pre.requestid, group_name,
updated_release.name))
db.commit()
|
11549609
|
class Element:
def __init__(self, attrs):
self.attributes = attrs
@property
def dataset(self):
def strip_data(key):
return key.split("-")[1]
_dataset = {
strip_data(key): value
for key, value in self.attributes.items()
if key.startswith("data-")
}
return _dataset
|
11549612
|
import logging
from datetime import datetime
from datetime import timedelta
from typing import Optional
from pytz import timezone
from pytz import utc
from yelp_beans.models import MeetingSpec
from yelp_beans.models import User
from yelp_beans.models import UserSubscriptionPreferences
def get_specs_for_current_week_query():
week_start = datetime.now() - timedelta(days=datetime.now().weekday())
week_start.replace(hour=0, minute=0, second=0, microsecond=0)
return MeetingSpec.query.filter(MeetingSpec.datetime > week_start)
def get_specs_for_current_week():
return get_specs_for_current_week_query().all()
def get_users_from_spec(meeting_spec):
logging.info('Meeting subscription for spec:')
logging.info(meeting_spec.meeting_subscription)
logging.info('All Preferences')
logging.info(UserSubscriptionPreferences.query.all())
user_sub_preferences = UserSubscriptionPreferences.query.filter(
UserSubscriptionPreferences.subscription_id == meeting_spec.meeting_subscription_id
).all()
logging.info('User Preferences')
logging.info(user_sub_preferences)
meeting_timezone = timezone(meeting_spec.meeting_subscription.timezone)
users = []
for user_preference in user_sub_preferences:
if user_preference.preference:
logging.info('User Preference')
logging.info(user_preference.preference)
logging.info(user_preference.preference.__dict__)
preference_dt = user_preference.preference.datetime.replace(tzinfo=utc).astimezone(meeting_timezone)
meeting_spec_dt = meeting_spec.datetime.replace(tzinfo=utc).astimezone(meeting_timezone)
if preference_dt.hour == meeting_spec_dt.hour and \
preference_dt.minute == meeting_spec_dt.minute and \
preference_dt.weekday() == meeting_spec_dt.weekday():
user = User.query.filter(
User.id == user_preference.user_id).first()
if user is not None:
logging.info('user added: ')
logging.info(user)
users.append(user)
return users
def get_meeting_datetime(meeting_spec: MeetingSpec, subscription_timezone: Optional[str] = None) -> datetime:
"""
Given a meeting_spec, returns the meeting datetime in the appropriate timezone.
:param meeting_spec: models.meeting_spec
:param subscription_timezone: Optional[str] - Timezone of the subscription. Falls back to getting
timezone from the meeting_spec subscription reference
:return: datetime.datetime in the correct timezone
"""
meeting_datetime = meeting_spec.datetime
if subscription_timezone is None:
meeting_timezone = meeting_spec.meeting_subscription.timezone
else:
meeting_timezone = subscription_timezone
return meeting_datetime.replace(tzinfo=utc).astimezone(timezone(meeting_timezone))
|
11549613
|
import torch
from src.dataclass import Context
from src.executable.train import train_model
def main(ctx: Context, chrome_trace_path: str = "torch_trace", steps: int = 128):
with torch.autograd.profiler.profile(use_cuda=True, use_cpu=False, use_kineto=True) as prof:
train_model(ctx, steps)
print(prof.key_averages())
if chrome_trace_path:
prof.export_chrome_trace(chrome_trace_path)
|
11549618
|
import unittest
import array
from iptest import IronPythonTestCase, is_cli, path_modifier, run_test
class HashTest(IronPythonTestCase):
def test_hash_before_eq(self):
class HashBeforeEq:
def __hash__(self):
return 1
def __eq__(self, other):
return self is other
x = HashBeforeEq()
self.assertNotEqual(x.__hash__, None)
self.assertEqual(hash(x), 1)
def test_eq_before_hash(self):
class EqBeforeHash:
def __eq__(self, other):
return self is other
def __hash__(self):
return 1
x = EqBeforeHash()
self.assertNotEqual(x.__hash__, None)
self.assertEqual(hash(x), 1)
def test_hash_writable_memoryviews(self):
buffer = array.array('b', [1,2,3])
self.assertRaises(ValueError, hash, memoryview(buffer))
run_test(__name__)
|
11549619
|
from django.test import TestCase
from molo.core import utils
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import SiteLanguageRelation, Languages, Main
from molo.core.utils import generate_slug
class TestUtils(TestCase, MoloTestCaseMixin):
def test_get_locale_code(self):
self.assertEqual(utils.get_locale_code(), 'en')
self.assertEqual(utils.get_locale_code('en-GB'), 'en-GB')
self.assertEqual(utils.get_locale_code('en_GB'), 'en-GB')
self.assertEqual(utils.get_locale_code('fr_FR'), 'fr-FR')
self.assertEqual(utils.get_locale_code('zu-ZA'), 'zu-ZA')
self.assertEqual(utils.get_locale_code('en'), 'en')
def test_slugify(self):
self.mk_main()
main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.mk_section(self.main, title='Your mind')
self.assertEqual(generate_slug('Your mind'), 'your-mind-1')
self.mk_section(self.main, title='Your mind')
self.assertEqual(generate_slug('Your mind'), 'your-mind-2')
self.assertEqual(generate_slug(None), 'no-title')
|
11549659
|
import keras
import src.vggbuilder as vggbuilder
import src.resnetbuilder as resnetbuilder
import src.histonetbuilder as histonetbuilder
import src.xceptionbuilder as xceptionbuilder
import src.mobilenetbuilder as mobilenetbuilder
from keras.applications import InceptionV3
from keras.layers import Dropout, GlobalAveragePooling2D, Dense
from keras import regularizers
from keras.models import Model
class ModelLoader:
# Initialize
def __init__(self, params):
self.model_type = params['model']
self.variant = params['variant']
self.num_classes = params['num_classes']
self.img_size = params['img_size']
self.size = [self.img_size, self.img_size, 3]
self.weight_decay = 5e-4
# Load and customize default Keras networks
def load_model(self):
if self.model_type == 'VGG':
if self.variant == 'Default':
model = vggbuilder.build_vgg16(input_shape=self.size, weight_decay=self.weight_decay, \
num_classes=self.num_classes, num_blocks=5)
else:
False
elif self.model_type == 'ResNet':
if self.variant =='resnet_18':
model = resnetbuilder.build_resnet_18(self.size, self.num_classes)
elif self.variant == 'resnet_34':
model = resnetbuilder.build_resnet_34(self.size, self.num_classes)
elif self.variant == 'resnet_50':
model = resnetbuilder.build_resnet_50(self.size, self.num_classes)
else:
False
elif self.model_type == 'Xception':
if self.variant == 'Xception_V1':
model = xceptionbuilder.Xception(self.size, self.num_classes)
else:
False
elif self.model_type == 'MobileNet':
if self.variant == 'V1':
model = mobilenetbuilder.MobileNet(self.size, self.num_classes)
else:
False
elif self.model_type == 'Inception':
base_model = InceptionV3(include_top=False, input_shape=self.size, weights=None)
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dropout(0.4)(x)
predictions = Dense(self.num_classes, activation='sigmoid', name='fc')(x)
model = Model(inputs=base_model.input, outputs=predictions)
elif self.model_type == 'HistoNet':
model = histonetbuilder.build_histonet_series_1(input_shape=self.size, weight_decay=self.weight_decay, num_classes=self.num_classes, config_code=self.variant)
return model
|
11549675
|
from flask import Flask, redirect, render_template, request
app = Flask(__name__)
@app.route('/')
def home():
# Redirect to the /play route
return redirect('/play')
@app.route('/play')
def play():
# Return a Jinja2 HTML template
return render_template('homepage.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally with 'python main.py'
app.run(host='127.0.0.1', port=8080, debug=True)
|
11549702
|
import os.path
import unittest
import tests.base_test
import tests.output_parser as output_parser
import tests.test_config
import tests.util
class Alternative3SpliceSiteNovelBaseTest(tests.base_test.BaseTest):
def setUp(self):
super().setUp()
self._test_base_dir = tests.test_config.TEST_BASE_DIR
self._test_dir = os.path.join(self._test_base_dir,
'alternative_3_splice_site_novel',
self._sub_test_name())
self._generated_input_dir = os.path.join(self._test_dir,
'generated_input')
self._out_dir = os.path.join(self._test_dir, 'out')
self._tmp_dir = os.path.join(self._test_dir, 'tmp')
tests.util.recreate_dirs([
self._generated_input_dir, self._out_dir, self._tmp_dir,
self._command_output_dir()
])
self._read_type = 'paired'
self._read_length = 50
self._chromosome_length = 2000
self._task = 'both'
self._sample_1_bams_path = os.path.join(self._generated_input_dir,
'b1.txt')
sample_1_bam_replicate_template = os.path.join(
self._generated_input_dir, 'sample_1_rep_{}.bam')
self._sample_1_bams = self._create_sample_1_bams(
self._sample_1_bams_path, sample_1_bam_replicate_template)
self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf')
self._gtf = self._create_gtf_from_transcripts(
self._gtf_path, self._exons_by_transcript())
def _command_output_dir(self):
return os.path.join(self._test_dir, 'command_output')
def _rmats_arguments(self):
return [
'--b1',
self._sample_1_bams_path,
'--gtf',
self._gtf_path,
'--od',
self._out_dir,
'-t',
self._read_type,
'--readLength',
str(self._read_length),
'--tmp',
self._tmp_dir,
'--task',
self._task,
'--statoff',
]
def _create_sample_1_bams(self, sample_1_bams_path,
sample_1_replicate_template):
rep_1_bam_path = sample_1_replicate_template.format(1)
rep_1_bam = self._create_bam_from_paired_read_coords(
rep_1_bam_path, self._chromosome_length, self._read_length,
self._paired_read_coords())
sample_1_bams = [rep_1_bam]
self._write_bams(sample_1_bams, sample_1_bams_path)
return sample_1_bams
def _sort_by_event_coords(self, rows):
return sorted(rows,
key=lambda r:
(int(r['flankingES']), int(r['flankingEE']),
int(r['longExonStart_0base']), int(r['shortES']),
int(r['shortEE']), int(r['longExonEnd'])))
def _check_event_coords(self, row, f_start, f_end, l_start, s_start, end):
self.assertEqual(row['flankingES'], f_start)
self.assertEqual(row['flankingEE'], f_end)
self.assertEqual(row['longExonStart_0base'], l_start)
self.assertEqual(row['shortES'], s_start)
self.assertEqual(row['shortEE'], end)
self.assertEqual(row['longExonEnd'], end)
class NovelJunction(Alternative3SpliceSiteNovelBaseTest):
def _sub_test_name(self):
return 'novel_junction'
def test(self):
self._run_test()
def _exons_by_transcript(self):
return [
[(1, 100), (201, 400)],
[(1, 100), (301, 400)],
[(801, 900)],
[(501, 600), (701, 900)],
[(1001, 1100), (1301, 1400)],
[(1201, 1400)],
]
def _paired_read_coords(self):
return [
([[501, 600]], [[501, 600], [801, 820]]),
([[1001, 1100]], [[1001, 1100], [1201, 1220]]),
]
def _check_results(self):
self._check_no_error_results()
from_gtf_a3ss_path = os.path.join(self._out_dir, 'fromGTF.A3SS.txt')
from_gtf_a3ss_header, from_gtf_a3ss_rows, error = output_parser.parse_from_gtf(
from_gtf_a3ss_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_a3ss_rows), 3)
sorted_from_gtf_a3ss_rows = self._sort_by_event_coords(
from_gtf_a3ss_rows)
self._check_event_coords(sorted_from_gtf_a3ss_rows[0], '0', '100',
'200', '300', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[1], '500', '600',
'700', '800', '900')
self._check_event_coords(sorted_from_gtf_a3ss_rows[2], '1000', '1100',
'1200', '1300', '1400')
from_gtf_novel_junction_a3ss_path = os.path.join(
self._out_dir, 'fromGTF.novelJunction.A3SS.txt')
from_gtf_novel_junction_a3ss_header, from_gtf_novel_junction_a3ss_rows, error = output_parser.parse_from_gtf_novel_junction(
from_gtf_novel_junction_a3ss_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_novel_junction_a3ss_rows), 2)
sorted_from_gtf_novel_junction_a3ss_rows = self._sort_by_event_coords(
from_gtf_novel_junction_a3ss_rows)
self._check_event_coords(sorted_from_gtf_novel_junction_a3ss_rows[0],
'500', '600', '700', '800', '900')
self._check_event_coords(sorted_from_gtf_novel_junction_a3ss_rows[1],
'1000', '1100', '1200', '1300', '1400')
from_gtf_novel_splice_site_a3ss_path = os.path.join(
self._out_dir, 'fromGTF.novelSpliceSite.A3SS.txt')
from_gtf_novel_splice_site_a3ss_header, from_gtf_novel_splice_site_a3ss_rows, error = output_parser.parse_from_gtf_novel_splice_site(
from_gtf_novel_splice_site_a3ss_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_novel_splice_site_a3ss_rows), 0)
class NovelSpliceSite(Alternative3SpliceSiteNovelBaseTest):
def _sub_test_name(self):
return 'novel_splice_site'
def test(self):
self._run_test()
def _rmats_arguments(self):
arguments = super()._rmats_arguments()
arguments.append('--novelSS')
return arguments
def _exons_by_transcript(self):
return [
[(1, 100), (201, 400)],
[(1, 100), (301, 400)],
]
def _paired_read_coords(self):
return [
([[81, 98], [201, 400]], [[201, 400]]),
([[81, 96], [301, 400]], [[301, 400]]),
([[81, 94], [201, 400]], [[201, 400]]),
([[81, 94], [301, 400]], [[301, 400]]),
([[1, 100]], [[1, 100], [303, 320]]),
([[1, 100]], [[1, 100], [203, 220]]),
]
def _check_results(self):
self._check_no_error_results()
from_gtf_a3ss_path = os.path.join(self._out_dir, 'fromGTF.A3SS.txt')
from_gtf_a3ss_header, from_gtf_a3ss_rows, error = output_parser.parse_from_gtf(
from_gtf_a3ss_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_a3ss_rows), 7)
sorted_from_gtf_a3ss_rows = self._sort_by_event_coords(
from_gtf_a3ss_rows)
self._check_event_coords(sorted_from_gtf_a3ss_rows[0], '0', '94',
'200', '300', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[1], '0', '100',
'200', '202', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[2], '0', '100',
'200', '300', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[3], '0', '100',
'200', '302', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[4], '0', '100',
'202', '300', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[5], '0', '100',
'202', '302', '400')
self._check_event_coords(sorted_from_gtf_a3ss_rows[6], '0', '100',
'300', '302', '400')
from_gtf_novel_junction_a3ss_path = os.path.join(
self._out_dir, 'fromGTF.novelJunction.A3SS.txt')
from_gtf_novel_junction_a3ss_header, from_gtf_novel_junction_a3ss_rows, error = output_parser.parse_from_gtf_novel_junction(
from_gtf_novel_junction_a3ss_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_novel_junction_a3ss_rows), 0)
from_gtf_novel_splice_site_a3ss_path = os.path.join(
self._out_dir, 'fromGTF.novelSpliceSite.A3SS.txt')
from_gtf_novel_splice_site_a3ss_header, from_gtf_novel_splice_site_a3ss_rows, error = output_parser.parse_from_gtf_novel_splice_site(
from_gtf_novel_splice_site_a3ss_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_novel_splice_site_a3ss_rows), 6)
sorted_from_gtf_novel_splice_site_a3ss_rows = self._sort_by_event_coords(
from_gtf_novel_splice_site_a3ss_rows)
self._check_event_coords(
sorted_from_gtf_novel_splice_site_a3ss_rows[0], '0', '94', '200',
'300', '400')
self._check_event_coords(
sorted_from_gtf_novel_splice_site_a3ss_rows[1], '0', '100', '200',
'202', '400')
self._check_event_coords(
sorted_from_gtf_novel_splice_site_a3ss_rows[2], '0', '100', '200',
'302', '400')
self._check_event_coords(
sorted_from_gtf_novel_splice_site_a3ss_rows[3], '0', '100', '202',
'300', '400')
self._check_event_coords(
sorted_from_gtf_novel_splice_site_a3ss_rows[4], '0', '100', '202',
'302', '400')
self._check_event_coords(
sorted_from_gtf_novel_splice_site_a3ss_rows[5], '0', '100', '300',
'302', '400')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
11549704
|
import pytest
import time
import asyncio
import os
BASE_DIR = os.path.dirname(__file__)
NOTEBOOK_EXECUTION_TIME = 2
NUMBER_PREHEATED_KERNEL = 2
TIME_THRESHOLD = 1
@pytest.fixture
def voila_config_file_paths_arg():
path = os.path.join(BASE_DIR, '..', 'configs', 'preheat')
return '--VoilaTest.config_file_paths=[%r]' % path
@pytest.fixture
def preheat_mode():
return True
@pytest.fixture
def voila_notebook(notebook_directory):
return os.path.join(notebook_directory, 'preheat')
async def send_request(sc, url, wait=0):
await asyncio.sleep(wait)
real_time = time.time()
response = await sc.fetch(url)
real_time = time.time() - real_time
html_text = response.body.decode("utf-8")
return real_time, html_text
async def test_render_notebook_with_heated_kernel(http_server_client, base_url):
await asyncio.sleep(NUMBER_PREHEATED_KERNEL*NOTEBOOK_EXECUTION_TIME + 1)
time, text = await send_request(sc=http_server_client, url=f'{base_url}voila/render/pre_heat.ipynb')
assert 'hello world' in text
assert time < TIME_THRESHOLD
await asyncio.sleep(NOTEBOOK_EXECUTION_TIME + 1)
async def test_render_blacklisted_notebook_with_nornal_kernel(http_server_client, base_url):
await asyncio.sleep(NUMBER_PREHEATED_KERNEL*NOTEBOOK_EXECUTION_TIME + 1)
time, text = await send_request(sc=http_server_client, url=f'{base_url}voila/render/blacklisted.ipynb')
assert 'hello world' in text
assert time > TIME_THRESHOLD
await asyncio.sleep(NOTEBOOK_EXECUTION_TIME + 1)
|
11549762
|
import sys
import os
import numpy as np
import tensorflow as tf
import csv
import pickle
import tarfile
import zipfile as z
import threading
from scipy import ndimage
from scipy.misc import imresize, imsave
from six.moves.urllib.request import urlretrieve
MB = 1024 ** 2
def download_hook_function(block, block_size, total_size):
if total_size != -1:
sys.stdout.write('Downloaded: %3.3fMB of %3.3fMB\r' % (float(block * block_size) / float(MB),
float(total_size) / float(MB)))
else:
sys.stdout.write('Downloaded: %3.3fMB of \'unknown size\'\r' % (float(block * block_size) / float(MB)))
sys.stdout.flush()
def download_file(file_url, output_file_dir, expected_size, FORCE=False):
name = file_url.split('/')[-1]
file_output_path = os.path.join(output_file_dir, name)
print('Attempting to download ' + file_url)
print('File output path: ' + file_output_path)
print('Expected size: ' + str(expected_size))
if not os.path.isdir(output_file_dir):
os.makedirs(output_file_dir)
if os.path.isfile(file_output_path) and os.stat(file_output_path).st_size == expected_size and not FORCE:
print('File already downloaded completely!')
return file_output_path
else:
print(' ')
filename, _ = urlretrieve(file_url, file_output_path, download_hook_function)
print(' ')
statinfo = os.stat(filename)
if statinfo.st_size == expected_size:
print('Found and verified', filename)
else:
raise Exception('Could not download ' + filename)
return filename
def extract_file(input_file, output_dir, FORCE=False):
if os.path.isdir(output_dir) and not FORCE:
print('%s already extracted to %s' % (input_file, output_dir))
directories = [x for x in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, x))]
return output_dir + "/" + directories[0]
else:
tar = tarfile.open(input_file)
sys.stdout.flush()
print('Started extracting:\n%s\nto:\n%s' % (input_file, output_dir))
tar.extractall(output_dir)
print('Finished extracting:\n%s\nto:\n%s' % (input_file, output_dir))
tar.close()
directories = [x for x in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, x))]
return output_dir + "/" + directories[0]
def load_class(folder, image_size, pixel_depth):
image_files = os.listdir(folder)
num_of_images = len(image_files)
dataset = np.ndarray(shape=(num_of_images, image_size, image_size),
dtype=np.float32)
image_index = 0
print('Started loading images from: ' + folder)
for index, image in enumerate(image_files):
sys.stdout.write('Loading image %d of %d\r' % (index + 1, num_of_images))
sys.stdout.flush()
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[image_index, :, :] = image_data
image_index += 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
print('Finished loading data from: ' + folder)
return dataset[0:image_index, :, :]
def make_pickles(input_folder, output_dir, image_size, image_depth, FORCE=False):
directories = sorted([x for x in os.listdir(input_folder) if os.path.isdir(os.path.join(input_folder, x))])
pickle_files = [os.path.join(output_dir, x + '.pickle') for x in directories]
for index, pickle_file in enumerate(pickle_files):
if os.path.isfile(pickle_file) and not FORCE:
print('\tPickle already exists: %s' % (pickle_file))
else:
folder_path = os.path.join(input_folder, directories[index])
print('\tLoading from folder: ' + folder_path)
data = load_class(folder_path, image_size, image_depth)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
print('\tStarted pickling: ' + directories[index])
try:
with open(pickle_file, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
print('Finished pickling: ' + directories[index])
return pickle_files
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def reformat(data, image_size, num_of_channels, num_of_classes, flatten=True):
if flatten:
data.train_dataset = data.train_dataset.reshape((-1, image_size * image_size * num_of_channels)).astype(np.float32)
data.valid_dataset = data.valid_dataset.reshape((-1, image_size * image_size * num_of_channels)).astype(np.float32)
data.test_dataset = data.test_dataset.reshape((-1, image_size * image_size * num_of_channels)).astype(np.float32)
else:
data.train_dataset = data.train_dataset.reshape((-1, image_size, image_size, num_of_channels)).astype(np.float32)
data.valid_dataset = data.valid_dataset.reshape((-1, image_size, image_size, num_of_channels)).astype(np.float32)
data.test_dataset = data.test_dataset.reshape((-1, image_size, image_size, num_of_channels)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
data.train_labels = (np.arange(num_of_classes) == data.train_labels[:, None]).astype(np.float32)
data.valid_labels = (np.arange(num_of_classes) == data.valid_labels[:, None]).astype(np.float32)
data.test_labels = (np.arange(num_of_classes) == data.test_labels[:, None]).astype(np.float32)
return data
def merge_datasets(pickle_files, image_size, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
def pickle_whole(train_pickle_files, test_pickle_files, image_size,
train_size, valid_size, test_size, output_file_path, FORCE=False):
if os.path.isfile(output_file_path) and not FORCE:
print('Pickle file: %s already exist' % (output_file_path))
with open(output_file_path, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
else:
print('Merging train, valid data')
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_pickle_files, image_size, train_size, valid_size)
print('Merging test data')
_, _, test_dataset, test_labels = merge_datasets(test_pickle_files, image_size, test_size)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
try:
f = open(output_file_path, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', output_file_path, ':', e)
raise
statinfo = os.stat(output_file_path)
print('Compressed pickle size:', statinfo.st_size)
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
def load_cifar_10_pickle(pickle_file, image_depth):
fo = open(pickle_file, 'rb')
dict = pickle.load(fo)
fo.close()
return ((dict['data'].astype(float) - image_depth / 2) / (image_depth)), dict['labels']
def load_cifar_10_from_pickles(train_pickle_files, test_pickle_files, pickle_batch_size, image_size, image_depth,
num_of_channels):
all_train_data = np.ndarray(shape=(pickle_batch_size * len(train_pickle_files),
image_size * image_size * num_of_channels),
dtype=np.float32)
all_train_labels = np.ndarray(shape=pickle_batch_size * len(train_pickle_files), dtype=object)
all_test_data = np.ndarray(shape=(pickle_batch_size * len(test_pickle_files),
image_size * image_size * num_of_channels),
dtype=np.float32)
all_test_labels = np.ndarray(shape=pickle_batch_size * len(test_pickle_files), dtype=object)
print('Started loading training data')
for index, train_pickle_file in enumerate(train_pickle_files):
all_train_data[index * pickle_batch_size: (index + 1) * pickle_batch_size, :], \
all_train_labels[index * pickle_batch_size: (index + 1) * pickle_batch_size] = \
load_cifar_10_pickle(train_pickle_file, image_depth)
print('Finished loading training data\n')
print('Started loading testing data')
for index, test_pickle_file in enumerate(test_pickle_files):
all_test_data[index * pickle_batch_size: (index + 1) * pickle_batch_size, :], \
all_test_labels[index * pickle_batch_size: (index + 1) * pickle_batch_size] = \
load_cifar_10_pickle(test_pickle_file, image_depth)
print('Finished loading testing data')
return all_train_data, all_train_labels, all_test_data, all_test_labels
def pickle_cifar_10(all_train_data, all_train_labels, all_test_data, all_test_labels,
train_size, valid_size, test_size, output_file_path, FORCE=False):
if os.path.isfile(output_file_path) and not FORCE:
print('\tPickle file already exists: %s' % output_file_path)
with open(output_file_path, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
else:
train_dataset = all_train_data[0:train_size]
train_labels = all_train_labels[0:train_size]
valid_dataset = all_train_data[train_size:train_size + valid_size]
valid_labels = all_train_labels[train_size:train_size + valid_size]
test_dataset = all_test_data[0:test_size]
test_labels = all_test_labels[0:test_size]
try:
f = open(output_file_path, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', output_file_path, ':', e)
raise
statinfo = os.stat(output_file_path)
print('Compressed pickle size:', statinfo.st_size)
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
def check_file_status(file_path, expected_size, error_message, close=True):
file_size = os.stat(file_path).st_size
if file_size == expected_size:
print("File status ({}): OK".format(file_path))
return True
else:
print("File status ({}): CORRUPTED. Expected size: {}, found: {}".format(file_path, expected_size, file_size))
print(error_message)
if close:
exit(-1)
else:
return False
def check_folder_status(folder_path, expected_num_of_files, success_message, error_message, close=True):
num_of_files_found = 0
for root, dirs, files in os.walk(folder_path):
num_of_files_found += len(files)
if num_of_files_found == expected_num_of_files:
print(success_message)
return True
else:
print(error_message)
if close:
exit(-1)
else:
return False
def crop_black_borders(image, threshold=0):
"""Crops any edges below or equal to threshold
Crops blank image to 1x1.
Returns cropped image.
"""
if len(image.shape) == 3:
flatImage = np.max(image, 2)
else:
flatImage = image
assert len(flatImage.shape) == 2
rows = np.where(np.max(flatImage, 0) > threshold)[0]
if rows.size:
cols = np.where(np.max(flatImage, 1) > threshold)[0]
image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
else:
image = image[:1, :1]
return image
def prepare_not_mnist_dataset(root_dir="."):
print('Started preparing notMNIST dataset')
image_size = 28
image_depth = 255
training_set_url = 'http://yaroslavvb.com/upload/notMNIST/notMNIST_large.tar.gz'
test_set_url = 'http://yaroslavvb.com/upload/notMNIST/notMNIST_small.tar.gz'
train_download_size = 247336696
test_download_size = 8458043
train_size = 200000
valid_size = 10000
test_size = 10000
num_of_classes = 10
num_of_channels = 1
dataset_path = os.path.realpath(os.path.join(root_dir, "datasets", "notMNIST"))
train_path = os.path.join(dataset_path, "train")
test_path = os.path.join(dataset_path, "test")
train_file_path = download_file(training_set_url, dataset_path, train_download_size)
test_file_path = download_file(test_set_url, dataset_path, test_download_size)
train_extracted_folder = extract_file(train_file_path, train_path)
test_extracted_folder = extract_file(test_file_path, test_path)
print('Started loading training data')
train_pickle_files = make_pickles(train_extracted_folder, train_path, image_size, image_depth)
print('Finished loading training data\n')
print('Started loading testing data')
test_pickle_files = make_pickles(test_extracted_folder, test_path, image_size, image_depth)
print('Finished loading testing data')
print('Started pickling final dataset')
train_dataset, train_labels, valid_dataset, valid_labels, \
test_dataset, test_labels = pickle_whole(train_pickle_files, test_pickle_files, image_size, train_size, valid_size,
test_size, os.path.join(dataset_path, 'notMNIST.pickle'))
print('Finished pickling final dataset')
print('Finished preparing notMNIST dataset')
def not_mnist(): pass
not_mnist.train_dataset = train_dataset
not_mnist.train_labels = train_labels
not_mnist.valid_dataset = valid_dataset
not_mnist.valid_labels = valid_labels
not_mnist.test_dataset = test_dataset
not_mnist.test_labels = test_labels
return not_mnist, image_size, num_of_classes, num_of_channels
def prepare_cifar_10_dataset():
print('Started preparing CIFAR-10 dataset')
image_size = 32
image_depth = 255
cifar_dataset_url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
dataset_size = 170498071
train_size = 45000
valid_size = 5000
test_size = 10000
num_of_classes = 10
num_of_channels = 3
pickle_batch_size = 10000
dataset_path = download_file(cifar_dataset_url,
os.path.realpath('../../datasets/CIFAR-10'), dataset_size)
dataset_extracted_folder = extract_file(dataset_path, os.path.realpath('../../datasets/CIFAR-10/data'))
train_pickle_files = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5']
train_pickle_files = [dataset_extracted_folder + '/' + x for x in train_pickle_files]
test_pickle_files = ['test_batch']
test_pickle_files = [dataset_extracted_folder + '/' + x for x in test_pickle_files]
print('Started loading CIFAR-10 dataset')
all_train_data, all_train_labels, all_test_data, all_test_labels = load_cifar_10_from_pickles(train_pickle_files,
test_pickle_files,
pickle_batch_size,
image_size,
image_depth,
num_of_channels)
print('Finished loading CIFAR-10 dataset')
print('Started pickling final dataset')
train_dataset, train_labels, valid_dataset, valid_labels, \
test_dataset, test_labels = pickle_cifar_10(all_train_data, all_train_labels, all_test_data, all_test_labels,
train_size, valid_size, test_size,
os.path.realpath('../../datasets/CIFAR-10/CIFAR-10.pickle'), True)
print('Finished pickling final dataset')
print('Finished preparing CIFAR-10 dataset')
def cifar_10(): pass
cifar_10.train_dataset = train_dataset
cifar_10.train_labels = train_labels
cifar_10.valid_dataset = valid_dataset
cifar_10.valid_labels = valid_labels
cifar_10.test_dataset = test_dataset
cifar_10.test_labels = test_labels
return cifar_10, image_size, num_of_classes, num_of_channels
def prepare_dr_dataset(dataset_dir):
num_of_processing_threads = 16
dr_dataset_base_path = os.path.realpath(dataset_dir)
unique_labels_file_path = os.path.join(dr_dataset_base_path, "unique_labels_file.txt")
processed_images_folder = os.path.join(dr_dataset_base_path, "processed_images")
num_of_processed_images = 35126
train_processed_images_folder = os.path.join(processed_images_folder, "train")
validation_processed_images_folder = os.path.join(processed_images_folder, "validation")
num_of_training_images = 30000
raw_images_folder = os.path.join(dr_dataset_base_path, "train")
train_labels_csv_path = os.path.join(dr_dataset_base_path, "trainLabels.csv")
def process_images_batch(thread_index, files, labels, subset):
num_of_files = len(files)
for index, file_and_label in enumerate(zip(files, labels)):
file = file_and_label[0] + '.jpeg'
label = file_and_label[1]
input_file = os.path.join(raw_images_folder, file)
output_file = os.path.join(processed_images_folder, subset, str(label), file)
image = ndimage.imread(input_file)
cropped_image = crop_black_borders(image, 10)
resized_cropped_image = imresize(cropped_image, (299, 299, 3), interp="bicubic")
imsave(output_file, resized_cropped_image)
if index % 10 == 0:
print("(Thread {}): Files processed {} out of {}".format(thread_index, index, num_of_files))
def process_images(files, labels, subset):
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(files), num_of_processing_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
threads = []
for thread_index in range(len(ranges)):
args = (thread_index, files[ranges[thread_index][0]:ranges[thread_index][1]],
labels[ranges[thread_index][0]:ranges[thread_index][1]],
subset)
t = threading.Thread(target=process_images_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
def process_training_and_validation_images():
train_files = []
train_labels = []
validation_files = []
validation_labels = []
with open(train_labels_csv_path) as csvfile:
reader = csv.DictReader(csvfile)
for index, row in enumerate(reader):
if index < num_of_training_images:
train_files.extend([row['image'].strip()])
train_labels.extend([int(row['level'].strip())])
else:
validation_files.extend([row['image'].strip()])
validation_labels.extend([int(row['level'].strip())])
if not os.path.isdir(processed_images_folder):
os.mkdir(processed_images_folder)
if not os.path.isdir(train_processed_images_folder):
os.mkdir(train_processed_images_folder)
if not os.path.isdir(validation_processed_images_folder):
os.mkdir(validation_processed_images_folder)
for directory_index in range(5):
train_directory_path = os.path.join(train_processed_images_folder, str(directory_index))
valid_directory_path = os.path.join(validation_processed_images_folder, str(directory_index))
if not os.path.isdir(train_directory_path):
os.mkdir(train_directory_path)
if not os.path.isdir(valid_directory_path):
os.mkdir(valid_directory_path)
print("Processing training files...")
process_images(train_files, train_labels, "train")
print("Done!")
print("Processing validation files...")
process_images(validation_files, validation_labels, "validation")
print("Done!")
print("Making unique labels file...")
with open(unique_labels_file_path, 'w') as unique_labels_file:
unique_labels = ""
for index in range(5):
unique_labels += "{}\n".format(index)
unique_labels_file.write(unique_labels)
status = check_folder_status(processed_images_folder, num_of_processed_images,
"All processed images are present in place",
"Couldn't complete the image processing of training and validation files.")
return status
process_training_and_validation_images()
return
|
11549771
|
import os
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from itsdangerous import URLSafeTimedSerializer, \
SignatureExpired, BadTimeSignature
from uuslug import slugify
from Content.common_tools import crop_img, delete_img
# Create your models here.
class User(AbstractUser):
birth = models.DateField('生日', null=True, blank=True)
about = models.TextField('关于我', max_length=512, default='', blank=True)
slug = models.SlugField('Slug', unique=True)
link = models.URLField("个人网站", blank=True)
avatar = models.ImageField(
'用户头像', upload_to='avatar/%Y/%m/%d', default='avatar/default.jpg')
is_confirmed = models.BooleanField("验证邮箱", default=False)
collection = models.OneToOneField(
'UserCollection', unique=True, related_name='user',\
on_delete=models.CASCADE)
class Meta:
verbose_name = '用户'
verbose_name_plural = '用户'
ordering = ('-date_joined',)
def __str__(self):
return 'User(username=%s)' % self.username
def save(self, *args, **kwargs):
self.slug = slugify(self.username.encode())
if not self.id:
# 首次保存用户,增加关联UserCollection模型
collection = UserCollection()
collection.save()
self.collection = collection
ret = super().save(*args, **kwargs)
default_avatar_path = self.avatar.field.default.replace('/', '\\')
try:
origin_user = User.objects.get(id=self.id)
except User.DoesNotExist:
return ret
else:
if default_avatar_path not in origin_user.avatar.path:
if origin_user.avatar.path != self.avatar.path:
# 修改头像,删除原来头像
try:
os.remove(origin_user.avatar.path)
except FileNotFoundError:
pass
if not default_avatar_path in self.avatar.path:
# 只剪裁用户上传的头像,不剪裁默认头像的【
AVATAR_WIDTH = getattr(settings, 'AVATAR_WIDTH', 800)
AVATAR_HEIGHT = getattr(settings, 'AVATAR_HEIGHT', 800)
crop_img(self.avatar, AVATAR_WIDTH, AVATAR_HEIGHT)
return ret
def delete(self, *args, **kwargs):
default_path = self.avatar.field.default.replace('/', '\\')
if not default_path in self.avatar.path:
delete_img(self.avatar)
return super().delete(*args, **kwargs)
def generate_token(self, info):
serializer = URLSafeTimedSerializer(getattr(settings, 'SECRET_KEY'))
key_info = {'id': self.id, 'info': info, }
token = serializer.dumps(key_info)
return token
@staticmethod
def load_token(token, expiration):
serializer = URLSafeTimedSerializer(getattr(settings, 'SECRET_KEY'))
try:
key_info = serializer.loads(token, max_age=expiration)
except SignatureExpired:
return False
except BadTimeSignature:
return False
else:
return key_info
def collect_book(self, book):
if not self.collection.books.filter(id=book.id).all():
self.collection.books.add(book)
self.save()
return True
else:
return False
def collect_discussion(self, discussion):
if not self.collection.discussions.filter(id=discussion.id).all():
self.collection.discussions.add(discussion)
self.save()
return True
else:
return False
def remove_collected_book(self, book):
if not self.collection.books.filter(id=book.id).all():
return False
else:
self.collection.books.remove(book)
return True
def remove_collected_discussion(self, discussion):
if not self.collection.discussions.filter(id=discussion.id).all():
return False
else:
self.collection.discussions.remove(discussion)
return True
class UserCollection(models.Model):
# TODO: 完善收藏功能
books = models.ManyToManyField(
'Content.Book', related_name='collection_users')
discussions = models.ManyToManyField(
'Discussion.Discuss', related_name='collection_users')
|
11549823
|
from common import *
from scitools.numpyutils import ravel, zeros, array, allclose, rank, \
meshgrid, newaxis
from scitools.globaldata import DEBUG, VERBOSE
from scitools.numpyutils import NumPy_dtype
from scitools.misc import check_if_module_exists
check_if_module_exists('vtk', msg='You need to install the VTK package.', abort=False)
import vtk
#import vtk.util.colors
import os
import Tkinter
# use old behavior in Tkinter module to get around issue with Tcl
# (more info: http://www.python.org/doc/2.3/whatsnew/node18.html)
Tkinter.wantobjects = 0
try: import vtkTkRenderWidget
except:
from vtk.tk import vtkTkRenderWidget
class VtkBackend(BaseClass):
"""Backend using VTK."""
def __init__(self):
BaseClass.__init__(self)
self.init()
def init(self):
self._master = None
self.figure(self._attrs['curfig'])
# conversion tables for format strings
self._colors = {
'': (0,0,1), # No color-->Blue
'k': (0,0,0), # Black
'r': (1,0,0), # Red
'g': (0,1,0), # Green
'b': (0,0,1), # Blue
'm': (1,0,1), # Magenta
'c': (0,1,1), # Cyan
'w': (1,1,1), # White
'y': (1,1,0), # Yellow
}
self._arrow_types = { # tuple: (type,rotation)
'': (9,0), # arrow
'.': (0,0), # no marker
'o': (7,0), # circle
'+': (3,0), # plus
'x': (3,45), # x-mark
'*': (3,0), # star --> plus
's': (6,0), # square
'd': (8,0), # diamond
'v': (5,180),# triangle (down)
'^': (5,0), # triangle (up)
'<': (5,90), # triangle (left)
'>': (5,270),# triangle (right)
'p': (6,0), # pentagram --> square
'h': (6,0), # hexagram --> square
}
self._colorbar_locations = {
'North': ((.2, .75), (.6,.09)),
'South': ((.2, .2), (.6, .09)),
'East': ((.75, .09), (.1, .9)),
'West': ((.2, .09), (.1, .9)),
'NorthOutside': ((.2, .86), (.6,.09)),
'SouthOutside': ((.2, .06), (.6, .09)),
'EastOutside': ((.86, .09), (.1, .9)),
'WestOutside': ((.01, .09), (.1, .9))
}
try:
v = vtk.vtkMesaRenderer()
_graphics_fact = vtk.vtkGraphicsFactory()
_graphics_fact.SetUseMesaClasses(1)
_image_fact = vtk.vtkImagingFactory()
_image_fact.SetUseMesaClasses(1)
del _graphics_fact
del _image_fact
del v
except Exception, msg:
if DEBUG:
print "No mesa", msg
def _create_Tk_gui(self):
fig = self.gcf()
if self._master is None:
self._master = Tkinter.Tk()
self._master.withdraw()
fig._root = Tkinter.Toplevel(self._master)
fig._root.title("Easyviz VTK Data Visualizer - Figure %d" % \
self._attrs['curfig'])
# if the window is closed, we should delete the current figure and
# create a new one.
def _close_fig(event=None):
self.clf()
fig._root.withdraw()
fig._root.protocol("WM_DELETE_WINDOW", _close_fig)
fig._root.minsize(200, 200)
fig._root.bind("<KeyPress-q>", _close_fig)
fig._root.withdraw()
master_f = Tkinter.Frame(fig._root, relief='sunken', bd=2)
master_f.pack(side='top', fill='both', expand=1)
renwin_frame = Tkinter.Frame(master_f)
renwin_frame.pack(side='left', fill='both', expand=1)
frame = Tkinter.Frame(renwin_frame)
frame.pack(side='top', fill='both', expand=1)
width, height = fig.getp('size')
if width is None or height is None:
width = 640; height = 480
tkw = vtkTkRenderWidget.vtkTkRenderWidget(frame,
width=width,
height=height)
tkw.pack(expand='true', fill='both')
renwin = tkw.GetRenderWindow()
renwin.SetSize(width, height)
#renwin.SetSize(width+1, height+1)
#renwin.SetSize(width-1, height-1)
#renwin.LineSmoothingOn()
#tkw.UpdateRenderer(0.0, 0.0)
#renwin.Render()
return renwin
def _set_view_old(self):
axis_cam = self._axis.getp('camera')
if not hasattr(self._axis, '_vtk_camera'):
# initialize camera:
pass
else:
# alter camera:
pass
camera = vtk.vtkCamera()
self._axis._renderer.SetActiveCamera(camera)
if axis_cam.getp('camproj') == 'perspective':
camera.ParallelProjectionOff()
else:
camera.ParallelProjectionOn()
fp = axis_cam.getp('camtarget')
camera.SetFocalPoint(fp)
camera.SetViewUp(axis_cam.getp('camup'))
if axis_cam.getp('cammode') == 'auto':
if axis_cam.getp('view') == 3:
camera.SetPosition(fp[0],fp[1]-1,fp[2])
camera.Azimuth(-37.5)
camera.Elevation(30)
else:
camera.SetPosition(fp[0], fp[1], 1)
else:
camera.SetPosition(axis_cam.getp('campos'))
#camera.ComputeViewPlaneNormal()
#camera.OrthogonalizeViewUp()
if axis_cam.getp('camva') is not None:
camera.SetViewAngle(axis_cam.getp('camva'))
azimuth = axis_cam.getp('azimuth')
if azimuth is not None:
if axis_cam.getp('view') == 3:
azimuth += 37.5 # compensate for above
camera.Azimuth(azimuth)
elevation = axis_cam.getp('elevation')
if elevation is not None:
if axis_cam.getp('view') == 3:
elevation -= 30 # compensate for above
camera.Elevation(elevation)
# make all actors fit inside the current scene:
self._axis._renderer.ResetCamera()
camera.Zoom(axis_cam.getp('camzoom'))
self._axis._vtk_camera = camera
def _initialize_camera(self):
ax_cam = self._axis.getp('camera')
camera = vtk.vtkCamera()
fp = ax_cam.getp('camtarget')
camera.SetFocalPoint(fp)
camera.SetViewUp(ax_cam.getp('camup'))
if ax_cam.getp('cammode') == 'auto':
if ax_cam.getp('view') == 3:
camera.SetPosition(fp[0],fp[1]-1,fp[2])
camera.Azimuth(-37.5)
camera.Elevation(30)
else:
camera.SetPosition(fp[0], fp[1], 1)
else:
camera.SetPosition(ax_cam.getp('campos'))
azimuth = ax_cam.getp('azimuth')
if azimuth is not None:
if ax_cam.getp('view') == 3:
azimuth += 37.5 # compensate for above
camera.Azimuth(azimuth)
elevation = ax_cam.getp('elevation')
if elevation is not None:
if ax_cam.getp('view') == 3:
elevation -= 30 # compensate for above
camera.Elevation(elevation)
return camera
def _update_camera(self):
ax_cam = self._axis.getp('camera')
camera = self._axis._vtk_camera
return camera
def _set_view(self):
ax_cam = self._axis.getp('camera')
if not hasattr(self._axis, '_vtk_camera'):
camera = self._initialize_camera()
else:
#camera = self._update_camera()
camera = self._initialize_camera()
if ax_cam.getp('camroll') is not None:
camera.Roll(ax_cam.getp('camroll'))
if ax_cam.getp('camva') is not None:
camera.SetViewAngle(ax_cam.getp('camva'))
if ax_cam.getp('camproj') == 'perspective':
camera.ParallelProjectionOff()
else:
camera.ParallelProjectionOn()
self._axis._renderer.SetActiveCamera(camera)
self._axis._vtk_camera = camera
# make sure all actors are inside the current view:
ren = self._axis._renderer
ren.ResetCamera()
#if self._axis.getp('camera').getp('view') == 2:
# ren.GetActiveCamera().Zoom(1.5)
camera.Zoom(ax_cam.getp('camzoom'))
# set the camera in the vtkCubeAxesActor2D object:
self._axis._vtk_axes.SetCamera(camera)
def _create_labeled_axes(self):
ax = self._axis
if not hasattr(ax, '_vtk_axes'):
ax._vtk_axes = vtk.vtkCubeAxesActor2D()
if ax.getp('visible'):
tprop = vtk.vtkTextProperty()
tprop.SetColor(ax.getp('fgcolor'))
tprop.SetFontSize(ax.getp('fontsize'))
tprop.SetShadow(0)
tprop.SetBold(0)
mode = ax.getp('mode')
if mode in ['auto', 'tight']:
dar = ax.getp('daspect')
bounds = b = list(ax._vtk_apd.GetOutput().GetBounds())
if mode == 'auto':
incr = 0.1
dx = float(b[1] - b[0])
dy = float(b[3] - b[2])
dz = float(b[5] - b[4])
#b[0] -= dx*incr; b[1] += dx*incr
#b[2] -= dy*incr; b[3] += dy*incr
#b[4] -= dz*incr; b[5] += dz*incr
unscaled_bounds = ub = bounds[:]
ub[0] *= dar[0]; ub[1] *= dar[0]
ub[2] *= dar[1]; ub[3] *= dar[1]
ub[4] *= dar[2]; ub[5] *= dar[2]
elif mode == 'fill':
print "axis mode 'fill' not implemented in VtkBackend"
elif mode == 'manual':
bounds = ax._vtk_scaled_bounds
unscaled_bounds = ax._vtk_bounds
#cube_axes = vtk.vtkCubeAxesActor2D()
ax._vtk_axes.SetBounds(bounds)
ax._vtk_axes.SetRanges(unscaled_bounds)
ax._vtk_axes.UseRangesOn()
#ax._vtk_axes.SetCamera(ax._vtk_camera)
ax._vtk_axes.SetLabelFormat("%6.3g")
ax._vtk_axes.SetFlyModeToOuterEdges()
#ax._vtk_axes.SetFontFactor(ax.getp('fontsize')/10.)
ax._vtk_axes.ScalingOff()
ax._vtk_axes.SetAxisTitleTextProperty(tprop)
ax._vtk_axes.SetAxisLabelTextProperty(tprop)
ax._vtk_axes.GetProperty().SetColor(ax.getp('fgcolor'))
ax._vtk_axes.SetCornerOffset(0)
ax._vtk_axes.SetNumberOfLabels(5)
ax._vtk_axes.SetXLabel(ax.getp('xlabel'))
ax._vtk_axes.SetYLabel(ax.getp('ylabel'))
ax._vtk_axes.SetZLabel(ax.getp('zlabel'))
if ax.getp('camera').getp('view') == 2:
ax._vtk_axes.YAxisVisibilityOff()
else:
ax._vtk_axes.YAxisVisibilityOn()
ax._renderer.AddActor(ax._vtk_axes)
ax._vtk_box_bounds = bounds
def _set_box_state(self):
ax = self._axis
if not hasattr(ax, '_vtk_box'):
ax._vtk_box = vtk.vtkActor()
# remove old box (if present):
if ax._renderer.GetActors().IsItemPresent(ax._vtk_box):
ax._renderer.RemoveActor(ax._vtk_box)
if ax.getp('box'):
box = vtk.vtkCubeSource()
#box.SetBounds(ax._vtk_scaled_bounds)
box.SetBounds(ax._vtk_box_bounds)
box.Update()
outline = vtk.vtkOutlineFilter()
outline.SetInput(box.GetOutput())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(outline.GetOutput())
ax._vtk_box.SetMapper(mapper)
ax._vtk_box.GetProperty().SetColor(ax.getp('fgcolor'))
ax._renderer.AddActor(ax._vtk_box)
def _set_colormap(self):
colormap = self._axis.getp('colormap')
if not isinstance(colormap, vtk.vtkLookupTable):
colormap = self.jet() # use default colormap
self._axis._vtk_colormap = colormap
def _set_colorbar(self):
ax = self._axis
cbar = ax.getp('colorbar')
if not hasattr(ax, '_vtk_colorbar'):
ax._vtk_colorbar = vtk.vtkScalarBarActor()
if ax._renderer.GetActors().IsItemPresent(ax._vtk_colorbar):
ax._renderer.RemoveActor2D(ax._vtk_colorbar)
if cbar.getp('visible'):
cblocation = cbar.getp('cblocation')
cbloc = self._colorbar_locations[cblocation]
ax._vtk_colorbar.SetLookupTable(ax._vtk_colormap)
if 'North' in cblocation or 'South' in cblocation:
ax._vtk_colorbar.SetOrientationToHorizontal()
else:
ax._vtk_colorbar.SetOrientationToVertical()
ax._vtk_colorbar.SetTitle(cbar.getp('cbtitle'))
ax._vtk_colorbar.SetPosition(*cbloc[0])
ax._vtk_colorbar.SetPosition2(*cbloc[1])
tprop = vtk.vtkTextProperty()
tprop.SetColor(ax.getp('fgcolor'))
tprop.SetFontSize(ax.getp('fontsize'))
tprop.ShadowOff()
ax._vtk_colorbar.SetTitleTextProperty(tprop)
ax._vtk_colorbar.SetLabelTextProperty(tprop)
ax._renderer.AddActor(ax._vtk_colorbar)
def _set_shading(self, item, source, actor):
shading = self._axis.getp('shading')
if shading == 'interp':
actor.GetProperty().SetInterpolationToGouraud()
elif shading == 'flat':
actor.GetProperty().SetInterpolationToFlat()
else: # use default shading ('faceted')
actor.GetProperty().SetInterpolationToFlat()
edges = vtk.vtkExtractEdges()
edges.SetInput(source.GetOutput())
edges.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(edges.GetOutput())
mapper.ScalarVisibilityOff()
mapper.SetResolveCoincidentTopologyToPolygonOffset()
mesh = vtk.vtkActor()
mesh.SetMapper(mapper)
color = item.getp('linecolor')
if color == '' or color is None:
color = (0,0,0) # use black as default
elif not isinstance(color, (tuple,list)):
try: color = self._colors[color]
except: color = (0,0,0) # use black as default
mesh.GetProperty().SetColor(color)
self._axis._renderer.AddActor(mesh)
def _set_title(self):
tprop = vtk.vtkTextProperty()
tprop.BoldOff()
tprop.SetFontSize(self._axis.getp('fontsize'))
tprop.SetColor(self._axis.getp('fgcolor'))
tprop.SetFontFamilyToArial()
tprop.SetVerticalJustificationToTop()
tprop.SetJustificationToCentered()
mapper = vtk.vtkTextMapper()
mapper.SetInput(self._fix_latex(self._axis.getp('title')))
mapper.SetTextProperty(tprop)
actor = vtk.vtkActor2D()
actor.SetMapper(mapper)
actor.GetPositionCoordinate().SetCoordinateSystemToView()
actor.GetPositionCoordinate().SetValue(0.0, 0.95)
self._axis._renderer.AddActor(actor)
def _set_caxis(self):
if self._axis.getp('caxismode') == 'auto':
caxis = None
else:
caxis = self._axis.getp('caxis')
self._axis._vtk_caxis = caxis
def _data_inside_bounds(self, data):
fb = self._axis._vtk_scaled_bounds
bounds = data.GetBounds()
for i in range(0, len(fb), 2):
if bounds[i] < fb[i] and not allclose(bounds[i],fb[i]):
return False
for i in range(1, len(fb), 2):
if bounds[i] > fb[i] and not allclose(bounds[i],fb[i]):
return False
return True
def _cut_data(self, indata):
if self._data_inside_bounds(indata.GetOutput()):
return indata
box = vtk.vtkBox()
box.SetBounds(self._axis._vtk_scaled_bounds)
clipper = vtk.vtkClipPolyData()
clipper.SetInput(indata.GetOutput())
clipper.SetClipFunction(box)
#clipper.GenerateClipScalarsOn()
#clipper.GenerateClippedOutputOn()
clipper.SetValue(0.0)
clipper.InsideOutOn()
clipper.Update()
return clipper
def _add_slices(self, item, sgrid, contours=False):
cvector = item.getp('cvector')
center = sgrid.GetCenter()
dar = self._axis.getp('daspect')
sx, sy, sz = item.getp('slices')
if len(shape(sx)) == 2 and shape(sx) == shape(sy) == shape(sz):
s = Surface(sx,sy,sz)
sgrid2 = self._get_2d_structured_grid(s)
plane = vtk.vtkStructuredGridGeometryFilter()
plane.SetInput(sgrid2)
plane.Update()
data = self._cut_data(plane)
implds = vtk.vtkImplicitDataSet()
implds.SetDataSet(data.GetOutput())
implds.Modified()
cut = vtk.vtkCutter()
cut.SetInput(sgrid)
cut.SetCutFunction(implds)
cut.GenerateValues(10, -2,2)
cut.GenerateCutScalarsOn()
cut.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(cut.GetOutput())
mapper.SetLookupTable(self._axis._vtk_colormap)
caxis = self._axis.getp('caxis')
if None in caxis:
caxis = data.GetOutput().GetScalarRange()
mapper.SetScalarRange(caxis)
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self._set_shading(item, data, actor)
self._set_actor_properties(item, actor)
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(cut.GetOutput())
self._axis._vtk_apd.AddInput(data.GetOutput())
else:
origins = []
normals = []
sx = ravel(sx)/dar[0]
sy = ravel(sy)/dar[1]
sz = ravel(sz)/dar[2]
for i in range(len(sx)):
normals.append([1,0,0])
origins.append([sx[i], center[1], center[2]])
for i in range(len(sy)):
normals.append([0,1,0])
origins.append([center[0], sy[i], center[2]])
for i in range(len(sz)):
normals.append([0,0,1])
origins.append([center[0], center[1], sz[i]])
for i in range(len(normals)):
plane = vtk.vtkPlane()
plane.SetOrigin(origins[i])
plane.SetNormal(normals[i])
cut = vtk.vtkCutter()
cut.SetInput(sgrid)
cut.SetCutFunction(plane)
cut.Update()
data = self._cut_data(cut)
mapper = vtk.vtkPolyDataMapper()
if contours:
iso = vtk.vtkContourFilter()
iso.SetInput(data.GetOutput())
if cvector is not None:
for i in range(len(cvector)):
iso.SetValue(i, cvector[i])
else:
zmin, zmax = data.GetOutput().GetScalarRange()
iso.GenerateValues(item.getp('clevels'), zmin, zmax)
iso.Update()
mapper.SetInput(iso.GetOutput())
else:
mapper.SetInput(data.GetOutput())
mapper.SetLookupTable(self._axis._vtk_colormap)
caxis = self._axis.getp('caxis')
if None in caxis:
caxis = sgrid.GetScalarRange()
mapper.SetScalarRange(caxis)
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if not contours:
self._set_shading(item, data, actor)
self._set_actor_properties(item, actor)
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(cut.GetOutput())
def _add_isosurface(self, item, sgrid):
iso = vtk.vtkContourFilter()
iso.SetInput(sgrid)
iso.SetValue(0, item.getp('isovalue'))
iso.Update()
data = self._cut_data(iso)
normals = vtk.vtkPolyDataNormals()
normals.SetInput(data.GetOutput())
normals.SetFeatureAngle(45)
normals.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(normals.GetOutput())
mapper.SetLookupTable(self._axis._vtk_colormap)
caxis = self._axis.getp('caxis')
if None in caxis:
caxis = sgrid.GetScalarRange()
mapper.SetScalarRange(caxis)
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self._set_shading(item, data, actor)
self._set_actor_properties(item, actor)
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(normals.GetOutput())
def _get_2d_structured_grid(self, item, vectors=False,
heights=True, bottom=False):
indexing = item.getp('indexing')
dar = self._axis.getp('daspect')
x = asarray(item.getp('xdata'))/dar[0]
y = asarray(item.getp('ydata'))/dar[1]
sgrid = vtk.vtkStructuredGrid()
sgrid.SetDimensions(item.getp('dims'))
no_points = item.getp('numberofpoints')
points = vtk.vtkPoints()
points.SetNumberOfPoints(no_points)
if vectors:
if hasattr(item, 'scale_vectors'):
item.scale_vectors()
x = ravel(x)
y = ravel(y)
u = asarray(item.getp('udata'))
v = ravel(item.getp('vdata'))
w = item.getp('wdata')
z = item.getp('zdata')
if z is None and w is None:
z = w = zeros(shape(u))
z = ravel(z)
w = ravel(w)
if item.getp('function') == 'quiver3':
z = z/dar[2]
if rank(u) == 2:
nx, ny = shape(u)
if indexing == 'ij':
if len(x) == nx:
x = ravel(x[:,newaxis]*ones((nx,ny)))
if len(y) == ny:
y = ravel(y[newaxis,:]*ones((nx,ny)))
else:
if len(x) == ny:
x = ravel(x[newaxis,:]*ones((nx,ny)))
if len(y) == nx:
y = ravel(y[:,newaxis]*ones((nx,ny)))
u = ravel(u)
vectors = vtk.vtkFloatArray()
vectors.SetNumberOfTuples(no_points)
vectors.SetNumberOfComponents(3)
vectors.SetNumberOfValues(3*no_points)
assert shape(x)==shape(y)==shape(z)==shape(u)==shape(v)==shape(w),\
"matrix dimensions must agree"
for i in range(no_points):
points.SetPoint(i, x[i], y[i], z[i])
vectors.SetTuple3(i, u[i], v[i], w[i])
points.Modified()
sgrid.SetPoints(points)
sgrid.GetPointData().SetVectors(vectors)
else:
values = asarray(item.getp('zdata'))
if heights:
z = values/dar[2]
elif bottom:
z = zeros(values.shape, NumPy_dtype(values)) + \
self._axis._vtk_scaled_bounds[4]
else:
z = zeros(values.shape, NumPy_dtype(values))
try:
cdata = asarray(item.getp('cdata'))
except KeyError:
pass
else:
if cdata is not None and cdata.shape == values.shape:
values = cdata
scalars = vtk.vtkFloatArray()
scalars.SetNumberOfTuples(no_points)
scalars.SetNumberOfComponents(1)
nx, ny = shape(values)
if not (shape(x) == shape(y) == (nx,ny)):
x, y = meshgrid(ravel(x), ravel(y),
sparse=False, indexing=indexing)
assert shape(x) == shape(y) == shape(z), \
"array dimensions must agree"
ind = 0
for j in range(ny):
for i in range(nx):
points.SetPoint(ind, x[i,j], y[i,j], z[i,j])
scalars.SetValue(ind, values[i,j])
ind += 1
points.Modified()
sgrid.SetPoints(points)
sgrid.GetPointData().SetScalars(scalars)
sgrid.Update()
return sgrid
def _get_3d_structured_grid(self, item, vectors=False):
indexing = item.getp('indexing')
dar = self._axis.getp('daspect')
x = asarray(item.getp('xdata'))/dar[0]
y = asarray(item.getp('ydata'))/dar[1]
z = asarray(item.getp('zdata'))/dar[2]
sgrid = vtk.vtkStructuredGrid()
sgrid.SetDimensions(item.getp('dims'))
no_points = item.getp('numberofpoints')
points = vtk.vtkPoints()
points.SetNumberOfPoints(no_points)
if vectors:
u = asarray(item.getp('udata'))
v = asarray(item.getp('vdata'))
w = asarray(item.getp('wdata'))
nx, ny, nz = shape(u)
if not (shape(x) == shape(y) == shape(z)):
x, y, z = meshgrid(ravel(x), ravel(y), ravel(z),
sparse=False, indexing=indexing)
assert shape(x) == shape(y) == shape(z) == \
shape(u) == shape(v) == shape(w), \
"array dimensions must agree"
vectors = vtk.vtkFloatArray()
vectors.SetNumberOfTuples(no_points)
vectors.SetNumberOfComponents(3)
vectors.SetNumberOfValues(3*no_points)
ind = 0
for k in range(nz):
for j in range(ny):
for i in range(nx):
points.SetPoint(ind, x[i,j,k], y[i,j,k], z[i,j,k])
vectors.SetTuple3(ind, u[i,j,k], v[i,j,k], w[i,j,k])
ind += 1
points.Modified()
sgrid.SetPoints(points)
sgrid.GetPointData().SetVectors(vectors)
else:
scalars = vtk.vtkFloatArray()
scalars.SetNumberOfTuples(no_points)
scalars.SetNumberOfComponents(1)
v = asarray(item.getp('vdata'))
# TODO: what about pseudocolor data?
#cdata = ravel(item.getp('cdata'))
#if cdata is not None:
# v = cdata
nx, ny, nz = shape(v)
if not (shape(x) == shape(y) == shape(z) == (nx,ny,nz)):
x, y, z = meshgrid(ravel(x), ravel(y), ravel(z),
sparse=False, indexing=indexing)
assert shape(x) == shape(y) == shape(z) == shape(v), \
"array dimensions must agree"
ind = 0
for k in range(nz):
for j in range(ny):
for i in range(nx):
points.SetPoint(ind, x[i,j,k], y[i,j,k], z[i,j,k])
scalars.SetValue(ind, v[i,j,k])
ind += 1
points.Modified()
sgrid.SetPoints(points)
sgrid.GetPointData().SetScalars(scalars)
sgrid.Update()
return sgrid
def _add_surface(self, item, sgrid):
plane = vtk.vtkStructuredGridGeometryFilter()
plane.SetInput(sgrid)
plane.Update()
data = self._cut_data(plane)
normals = vtk.vtkPolyDataNormals()
normals.SetInput(data.GetOutput())
normals.SetFeatureAngle(45)
normals.Update()
mapper = vtk.vtkDataSetMapper()
mapper.SetInput(normals.GetOutput())
mapper.SetLookupTable(self._axis._vtk_colormap)
caxis = self._axis.getp('caxis')
if None in caxis:
caxis = data.GetOutput().GetScalarRange()
mapper.SetScalarRange(caxis)
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if item.getp('wireframe'):
actor.GetProperty().SetRepresentationToWireframe()
else:
self._set_shading(item, data, actor)
self._set_actor_properties(item, actor)
self._add_legend(item, normals.GetOutput())
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(normals.GetOutput())
def _add_contours(self, item, sgrid):
plane = vtk.vtkStructuredGridGeometryFilter()
plane.SetInput(sgrid)
plane.Update()
data = self._cut_data(plane)
filled = item.getp('filled')
if filled:
iso = vtk.vtkBandedPolyDataContourFilter()
iso.SetScalarModeToValue()
iso.GenerateContourEdgesOn()
else:
iso = vtk.vtkContourFilter()
iso.SetInput(data.GetOutput())
clevels = item.getp('clevels')
cvector = item.getp('cvector')
if cvector is not None:
for i in range(clevels):
iso.SetValue(i, cvector[i])
else:
zmin, zmax = data.GetOutput().GetScalarRange()
iso.SetNumberOfContours(clevels)
iso.GenerateValues(clevels, zmin, zmax)
iso.Update()
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInput(iso.GetOutput())
isoMapper.SetLookupTable(self._axis._vtk_colormap)
caxis = self._axis.getp('caxis')
if None in caxis:
caxis = data.GetOutput().GetScalarRange()
isoMapper.SetScalarRange(caxis)
if item.getp('linecolor'): # linecolor is defined:
isoMapper.ScalarVisibilityOff()
isoMapper.Update()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
self._set_actor_properties(item, isoActor)
self._add_legend(item, iso.GetOutput())
self._axis._renderer.AddActor(isoActor)
self._axis._vtk_apd.AddInput(data.GetOutput())
if filled: # create contour edges:
edgeMapper = vtk.vtkPolyDataMapper()
edgeMapper.SetInput(iso.GetContourEdgesOutput())
edgeMapper.SetResolveCoincidentTopologyToPolygonOffset()
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(edgeMapper)
edgeActor.GetProperty().SetColor(0, 0, 0)
self._axis._renderer.AddActor(edgeActor)
if item.getp('clabels'):
# subsample the points and label them:
mask = vtk.vtkMaskPoints()
mask.SetInput(iso.GetOutput())
mask.SetOnRatio(data.GetOutput().GetNumberOfPoints()/50)
mask.SetMaximumNumberOfPoints(50)
mask.RandomModeOn()
# Create labels for points - only show visible points
visPts = vtk.vtkSelectVisiblePoints()
visPts.SetInput(mask.GetOutput())
visPts.SetRenderer(self._axis._renderer)
ldm = vtk.vtkLabeledDataMapper()
ldm.SetInput(mask.GetOutput())
ldm.SetLabelFormat("%.1g")
ldm.SetLabelModeToLabelScalars()
tprop = ldm.GetLabelTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
tprop.SetColor(0,0,0)
tprop.ShadowOff()
tprop.BoldOff()
contourLabels = vtk.vtkActor2D()
contourLabels.SetMapper(ldm)
self._axis._renderer.AddActor(contourLabels)
def _add_velocity_vectors(self, item, sgrid):
marker, rotation = self._arrow_types[item.getp('linemarker')]
arrow = vtk.vtkGlyphSource2D()
arrow.SetGlyphType(marker)
arrow.SetFilled(item.getp('filledarrows'))
arrow.SetRotationAngle(rotation)
if arrow.GetGlyphType() != 9: # not an arrow
arrow.DashOn()
arrow.SetCenter(.75,0,0)
else:
arrow.SetCenter(.5,0,0)
arrow.SetColor(self._colors[item.getp('linecolor')])
plane = vtk.vtkStructuredGridGeometryFilter()
plane.SetInput(sgrid)
plane.Update()
data = self._cut_data(plane)
glyph = vtk.vtkGlyph3D()
glyph.SetInput(data.GetOutput())
glyph.SetSource(arrow.GetOutput())
glyph.SetColorModeToColorByVector()
glyph.SetRange(data.GetOutput().GetScalarRange())
glyph.ScalingOn()
glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.SetVectorModeToUseVector()
glyph.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(glyph.GetOutput())
#vr = data.GetOutput().GetPointData().GetVectors().GetRange()
#mapper.SetScalarRange(vr)
mapper.ScalarVisibilityOff()
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self._set_actor_properties(item, actor)
self._add_legend(item, arrow.GetOutput())
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(glyph.GetOutput())
def _add_streams(self, item, sgrid):
length = sgrid.GetLength()
max_velocity = sgrid.GetPointData().GetVectors().GetMaxNorm()
max_time = 35.0*length/max_velocity
n = item.getp('numberofstreams')
sx = ravel(item.getp('startx'))
sy = ravel(item.getp('starty'))
sz = None
if item.getp('startz') is not None:
sz = ravel(item.getp('startz'))
dar = self._axis.getp('daspect')
for i in range(n):
integ = vtk.vtkRungeKutta2() # or 4?
stream = vtk.vtkStreamLine()
stream.SetInput(sgrid)
stream.SetStepLength(item.getp('stepsize'))
#stream.SetIntegrationStepLength(item.getp('stepsize'))
#stream.SetIntegrationDirectionToIntegrateBothDirections()
stream.SetIntegrationDirectionToForward()
#stream.SetMaximumPropagationTime(max_time)
#stream.SetMaximumPropagationTime(200)
stream.SpeedScalarsOn()
#stream.VorticityOn()
if sz is None:
stream.SetStartPosition(sx[i]/dar[0], sy[i]/dar[1], 0)
else:
stream.SetStartPosition(sx[i]/dar[0],
sy[i]/dar[1],
sz[i]/dar[2])
stream.SetIntegrator(integ)
stream.Update()
data = self._cut_data(stream)
if item.getp('ribbons'):
streamribbon = vtk.vtkRibbonFilter()
streamribbon.SetInput(data.GetOutput())
streamribbon.VaryWidthOn()
streamribbon.SetWidthFactor(item.getp('ribbonwidth'))
#streamribbon.SetAngle(90)
streamribbon.SetDefaultNormal([0,1,0])
streamribbon.UseDefaultNormalOn()
streamribbon.Update()
output = streamribbon.GetOutput()
elif item.getp('tubes'):
streamtube = vtk.vtkTubeFilter()
streamtube.SetInput(data.GetOutput())
streamtube.SetRadius(1)
streamtube.SetNumberOfSides(item.getp('n'))
streamtube.SetVaryRadiusToVaryRadiusByVector()
streamtube.Update()
output = streamtube.GetOutput()
else:
output = data.GetOutput()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(output)
mapper.SetLookupTable(self._axis._vtk_colormap)
caxis = self._axis.getp('caxis')
if None in caxis:
caxis = output.GetBounds()[4:]
#caxis = sgrid.GetScalarRange()
mapper.SetScalarRange(caxis)
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
#self._set_shading(item, stream, actor)
self._set_actor_properties(item, actor)
self._add_legend(item, output)
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(output)
def _add_line(self, item):
dar = self._axis.getp('daspect')
n = item.getp('numberofpoints')
polydata = vtk.vtkPolyData()
polydata.SetLines(vtk.vtkCellArray())
points = vtk.vtkPoints()
#points.SetNumberOfPoints(n)
x = ravel(item.getp('xdata'))/dar[0]
y = ravel(item.getp('ydata'))/dar[1]
z = item.getp('zdata')
if z is not None:
z = ravel(z)/dar[2]
else:
z = zeros(n, NumPy_dtype(x))
ids = vtk.vtkIdList()
for i in range(1,n):
points.InsertNextPoint(x[i-1], y[i-1], z[i-1])
ids.InsertNextId(i-1)
ids.InsertNextId(i)
polydata.InsertNextCell(3, ids)
points.InsertNextPoint(x[n-1], y[n-1], z[n-1]) # last point
polydata.SetPoints(points)
polydata.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self._set_actor_properties(item, actor)
self._add_legend(item, polydata)
self._axis._renderer.AddActor(actor)
self._axis._vtk_apd.AddInput(polydata)
def _set_actor_properties(self, item, actor):
# set line properties:
color = item.getp('linecolor')
if not isinstance(color, (tuple,list)):
try: color = self._colors[color]
except: color = (0,0,1) # use blue as default
actor.GetProperty().SetColor(color)
if item.getp('linetype') == '--':
actor.GetProperty().SetLineStipplePattern(65280)
elif item.getp('linetype') == ':':
actor.GetProperty().SetLineStipplePattern(0x1111)
actor.GetProperty().SetLineStippleRepeatFactor(1)
#actor.GetProperty().SetPointSize(item.getp('pointsize'))
linewidth = item.getp('linewidth')
if linewidth:
actor.GetProperty().SetLineWidth(float(linewidth))
# set material properties:
ax = self._axis
mat = item.getp('material')
if mat.getp('opacity') is not None:
actor.GetProperty().SetOpacity(mat.getp('opacity'))
if mat.getp('ambient') is not None:
actor.GetProperty().SetAmbient(mat.getp('ambient'))
if ax.getp('ambientcolor') is not None:
actor.GetProperty().SetAmbientColor(ax.getp('ambientcolor'))
if mat.getp('diffuse') is not None:
actor.GetProperty().SetDiffuse(mat.getp('diffuse'))
if ax.getp('diffusecolor') is not None:
actor.GetProperty().SetDiffuseColor(ax.getp('diffusecolor'))
if mat.getp('specular') is not None:
actor.GetProperty().SetSpecular(mat.getp('specular'))
if mat.getp('specularpower') is not None:
actor.GetProperty().SetSpecularPower(mat.getp('specularpower'))
def _set_grid(self):
if not self._axis.getp('visible') or not self._axis.getp('grid'):
return
b = self._axis._vtk_box_bounds #self._axis._vtk_scaled_bounds
if self._axis.getp('camera').getp('view') == 3:
origins = [[b[0],b[2],b[4]], [b[0],b[3],b[4]], [b[1],b[2],b[4]]]
points1 = [[b[1],b[2],b[4]], [b[0],b[3],b[5]], [b[1],b[2],b[5]]]
points2 = [[b[0],b[3],b[4]], [b[1],b[3],b[4]], [b[1],b[3],b[4]]]
else:
origins = [[b[0],b[2],0]]
points1 = [[b[0],b[3],0]]
points2 = [[b[1],b[2],0]]
for i in range(len(origins)):
plane = vtk.vtkPlaneSource()
plane.SetResolution(4, 4)
plane.SetOrigin(origins[i])
plane.SetPoint1(points1[i])
plane.SetPoint2(points2[i])
plane.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(plane.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0,0,0)
actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().SetLineStipplePattern(0x1111)
actor.GetProperty().SetLineStippleRepeatFactor(1)
self._axis._renderer.AddActor(actor)
def _create_vtk_data(self):
for item in self._axis.getp('plotitems'):
func = item.getp('function')
if isinstance(item, Line):
self._add_line(item)
elif isinstance(item, Surface):
if func == 'pcolor':
sgrid = self._get_2d_structured_grid(item, heights=False)
else:
sgrid = self._get_2d_structured_grid(item)
self._add_surface(item, sgrid)
citem = item.getp('contours')
if isinstance(citem, Contours):
csgrid = self._get_2d_structured_grid(citem, heights=False,
bottom=True)
self._add_contours(citem, csgrid)
elif isinstance(item, Contours):
if item.getp('clocation') == 'surface':
sgrid = self._get_2d_structured_grid(item)
else:
sgrid = self._get_2d_structured_grid(item, heights=False)
self._add_contours(item, sgrid)
elif isinstance(item, VelocityVectors):
if len(item.getp('udata').shape) == 3:
sgrid = self._get_3d_structured_grid(item, vectors=True)
else:
sgrid = self._get_2d_structured_grid(item, vectors=True)
self._add_velocity_vectors(item, sgrid)
elif isinstance(item, Streams):
if len(item.getp('udata').shape) == 3:
sgrid = self._get_3d_structured_grid(item, vectors=True)
else:
sgrid = self._get_2d_structured_grid(item, vectors=True)
self._add_streams(item, sgrid)
elif isinstance(item, Volume):
sgrid = self._get_3d_structured_grid(item)
contours = func == 'contourslice'
if func in ['slice_', 'contourslice']:
self._add_slices(item, sgrid, contours=contours)
elif func == 'isosurface':
self._add_isosurface(item, sgrid)
else:
raise NotImplementedError('%s not yet implemented' % item)
self._axis._vtk_apd.Update()
def _fix_latex(self, legend):
"""Remove latex syntax a la $, \, {, } etc."""
legend = legend.strip()
# General fix of latex syntax (more readable)
legend = legend.replace('**', '^')
#legend = legend.replace('*', '')
legend = legend.replace('$', '')
legend = legend.replace('{', '')
legend = legend.replace('}', '')
legend = legend.replace('\\', '')
return legend
def _add_legend(self, item, polydata):
legend = self._fix_latex(item.getp('legend'))
if legend:
ax = self._axis
n = ax._vtk_nolegends
ax._vtk_nolegends += 1
ax._vtk_legendbox.SetNumberOfEntries(ax._vtk_nolegends)
ax._vtk_legendbox.SetEntrySymbol(n, polydata)
ax._vtk_legendbox.SetEntryString(n, legend)
color = self._colors[item.getp('linecolor')]
ax._vtk_legendbox.SetEntryColor(n, color)#ax.getp('fgcolor'))
def _set_legends(self):
ax = self._axis
n = ax._vtk_nolegends
if n > 0:
ax._vtk_legendbox.SetNumberOfEntries(n)
#ax._vtk_legendbox.ScalarVisibilityOff()
#ax._vtk_legendbox.BorderOff()
ax._vtk_legendbox.GetPositionCoordinate().SetValue(0.8, 0.2, 0)
ax._vtk_legendbox.GetPosition2Coordinate().SetValue(.2, n*.1, 0)
ax._vtk_legendbox.GetProperty().SetColor(ax.getp('fgcolor'))
ax._renderer.AddActor(ax._vtk_legendbox)
def _set_lights(self):
if not hasattr(self._axis, '_vtk_lights'):
self._axis._vtk_lights = []
else: # remove all lights (if any)
for l in self._axis._vtk_lights:
self._axis._renderer.RemoveLight(l)
self._axis._vtk_lights = []
for l in self._axis.getp('lights'):
light = vtk.vtkLight()
light.SetColor(l.getp('lightcolor'))
light.SetFocalPoint(l.getp('lighttarget'))
light.SetPosition(l.getp('lightpos'))
self._axis._renderer.AddLight(light)
self._axis._vtk_lights.append(light)
def _setup_axis(self):
ax = self._axis
xmin, xmax = ax.getp('xmin'), ax.getp('xmax')
if None in [xmin, xmax]:
xmin, xmax = ax.getp('xlim')
ymin, ymax = ax.getp('ymin'), ax.getp('ymax')
if None in [ymin, ymax]:
ymin, ymax = ax.getp('ylim')
zmin, zmax = ax.getp('zmin'), ax.getp('zmax')
if None in [zmin, zmax]:
zmin, zmax = ax.getp('zlim')
bnds = [xmin, xmax, ymin, ymax, zmin, zmax]
ax._vtk_bounds = bnds[:]
# scale axis:
dar = ax.getp('daspect')
bnds[0] = bnds[0]/dar[0]; bnds[1] = bnds[1]/dar[0]
bnds[2] = bnds[2]/dar[1]; bnds[3] = bnds[3]/dar[1]
bnds[4] = bnds[4]/dar[2]; bnds[5] = bnds[5]/dar[2]
ax._vtk_scaled_bounds = bnds[:]
## fig = self.gcf()
## # clean up:
## if hasattr(ax, '_renderer'):
## self._g.RemoveRenderer(ax._renderer)
## if ax._renderer in fig._renderers:
## fig._renderers.remove(ax._renderer)
## del ax._renderer
## ax._renderer = vtk.vtkRenderer()
## self._g.AddRenderer(ax._renderer)
## # add this new renderer to the current figures list of renderers
## # so we can remove it later (e.g. when using clf()):
## fig._renderers.append(ax._renderer)
if not hasattr(ax, '_renderer'):
ax._renderer = vtk.vtkRenderer()
self._g.AddRenderer(ax._renderer)
# add this new renderer to the current figures list of renderers
# so we can remove it later (e.g. when using clf()):
gcf()._renderers.append(ax._renderer)
if hasattr(ax, '_vtk_legendbox'):
ax._renderer.RemoveActor(ax._vtk_legendbox)
ax._vtk_legendbox = vtk.vtkLegendBoxActor()
ax._vtk_nolegends = 0
ax._vtk_legendbox.SetNumberOfEntries(0)
#ax._renderer.TwoSidedLightingOff()
ax._renderer.SetBackground(ax.getp('bgcolor'))
viewport = ax.getp('viewport')
if not viewport:
viewport = (0,0,1,1)
ax._renderer.SetViewport(viewport)
ax._renderer.RemoveAllViewProps() # clear current scene
#axshape = self.gcf().getp('axshape')
#ax._renderer.SetPixelAspect(axshape[1], axshape[0])
if not hasattr(ax, '_vtk_apd'):
ax._vtk_apd = vtk.vtkAppendPolyData()
else:
ax._vtk_apd.RemoveAllInputs()
def _replot(self):
self._axis = gca() # shortcut for fast access
fig = self.gcf()
if fig.getp('axshape') != fig._axshape:
# remove all current renderers:
for ren in fig._renderers:
self._g.RemoveRenderer(ren)
fig._renderers = []
fig._axshape = fig.getp('axshape')
#if self._master is not None:
# fig._root.withdraw()
if len(self._axis.getp('plotitems')) > 0:
self._setup_axis()
self._set_lights()
self._set_colormap()
self._set_caxis()
self._create_vtk_data()
self._create_labeled_axes()
self._set_view()
self._set_box_state()
self._set_colorbar()
self._set_title()
self._set_legends()
self._set_grid()
# render scene:
self._axis._renderer.Render()
if self.getp('show') and hasattr(fig, '_root'):
fig._root.deiconify() # raise window
fig._root.update()
# render complete scene:
self._g.Render()
def figure(self, *args, **kwargs):
"""Extension of BaseClass.figure"""
fig = BaseClass.figure(self, *args, **kwargs)
try:
fig._g
except:
try:
fig._g = self._create_Tk_gui()
except Tkinter.TclError:
# can't create gui; only offscreen rendering
fig._g = vtk.vtkRenderWindow()
try:
width, height = fig.getp('size')
except TypeError:
width, height = (640, 480)
if width is None or height is None:
width, height = (640, 480)
fig._g.SetSize(width, height)
fig._g.OffScreenRenderingOn()
fig._renderers = []
fig._axshape = fig.getp('axshape')
self._g = fig._g # link for faster access
#self._g.SetAAFrames(5)
return fig
def clf(self):
"""Clear current figure."""
fig = self.gcf()
for ren in fig._renderers:
self._g.RemoveRenderer(ren)
fig._renderers = []
if self._master is not None:
fig._root.withdraw() # hide window
del fig._g
BaseClass.clf(self)
def closefig(self, num):
"""Close figure window with number 'num'."""
if num in self._figs:
curfig = self._attrs['curfig']
self.figure(num) # set figure with 'num' as current figure
self.clf()
del self._figs[num]
self.figure(curfig) # put back the current figure
else:
print 'no such figure with number', num
def closefigs(self):
"""Close all figure windows."""
keys = self._figs.keys()
for key in keys:
closefig(key)
BaseClass.closefigs(self)
self.figure(self._attrs['curfig'])
def hardcopy(self, filename="",
quality=100,
progressive=False,
vector_file=True,
landscape=False,
raster3d=False,
compress=False,
**kwargs):
"""The figure can be stored in either a vector PostScript (PS/EPS) or
PDF file using GL2PS or a image file (PNG/PNM/JPEG/TIFF/BMP) using
a corresponding vtkWriter instance. PostScript output can also be
generated using vtkPostScriptWriter if vector_file is set to False.
TeX output is also available, but only the text output will be saved
to the file.
If the given filename has no extension, then EPS output will be used.
If `filename` contains just the file extension, say ``.png``,
it is saved to ``tmp.png``.
Keyword arguments:
quality -- Sets the quality of the resulting image. Affects only
JPEG images. Given as an integer between 0 and 100
where 100 results in the best quality (but also
the largest file). The default is quality=100.
progressive -- Sets whether to use progressive JPEG generation or
not. Defaults to False.
vector_file -- If True (default), the figure will be stored as a
vector file. This is only true if either PS, EPS,
or PDF are choosen as the output file format.
Additional keyword arguments (only in affect if the vector_file
argument is set to True and the file format is either PS, EPS or PDF):
landscape -- Sets whether to use landscape or portrait orientation.
A True value result in landscape orientation. Defaults
to False.
raster3d -- If True, this will write 3D props as raster images
while 2D props are rendered using vector graphic
primitives. Defaults to False.
compress -- Compression when generating PostScript or PDF output.
The default is False (no compression).
"""
if filename.startswith('.'):
filename = 'tmp' + filename
if not filename:
raise TypeError("hardcopy: No filename given, cannot save figure.")
self.setp(**kwargs)
basename, ext = os.path.splitext(filename)
if not ext: # no extension given, use .eps
ext = '.eps'
filename += ext
if not self.getp('show'): # don't display to screen
self._g.OffScreenRenderingOn()
replot = kwargs.get('replot', True)
if replot:
self._replot()
color = self.getp('color')
vector_file_formats = {'.ps': 0, '.eps': 1, '.pdf': 2, '.tex': 3}
if vector_file and ext.lower() in vector_file_formats:
exp = vtk.vtkGL2PSExporter()
exp.SetRenderWindow(self._g)
exp.SetFilePrefix(basename)
exp.SetFileFormat(vector_file_formats[ext.lower()])
exp.SetCompress(bool(compress))
exp.SetLandscape(bool(landscape))
exp.SetSortToBSP()
#exp.SetSortToSimple() # less expensive sort algorithm
exp.DrawBackgroundOn()
exp.SetWrite3DPropsAsRasterImage(bool(raster3d))
exp.Write()
else:
image_writers = {
'.tif': vtk.vtkTIFFWriter(),
'.bmp': vtk.vtkBMPWriter(),
'.pnm': vtk.vtkPNMWriter(),
'.png': vtk.vtkPNGWriter(),
'.jpg': vtk.vtkJPEGWriter(),
'.ps': vtk.vtkPostScriptWriter(),
'.eps': vtk.vtkPostScriptWriter(), # no EPS file
}
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(self._g)
try:
writer = image_writers[ext.lower()]
except KeyError:
raise TypeError(
"hardcopy: File format '%s' is currently not supported."\
% ext)
try:
writer.SetQuality(int(quality))
writer.SetProgressive(bool(progressive))
except ValueError:
raise ValueError(\
"hardcopy: Integer required for the 'quality' argument.")
except AttributeError:
pass # only vtkJPEGWriter has quality and progressive attrs.
writer.SetFileName(filename)
writer.SetInput(w2if.GetOutput())
writer.Write()
self._g.OffScreenRenderingOff()
hardcopy.__doc__ = BaseClass.hardcopy.__doc__ + hardcopy.__doc__
def brighten(self, *args):
"""Brighten or darken color map."""
nargs = len(args)
if nargs == 2: # brighten(map,beta)
if not isinstance(args[0], vtk.vtkLookupTable):
raise ValueError("brighten: map must be %s, not %s" % \
(type(vtk.vtkLookupTable), type(args[0])))
lut, beta = args
elif nargs == 1: # brighten(beta)
if not hasattr(self._axis, '_vtk_colormap'):
print "brighten: no colormap set."
return
lut = self._axis._vtk_colormap
beta = args[0]
if not isinstance(beta, (float,int)) or (beta <= -1 or beta >= 1):
raise ValueError("brighten: beta must be between -1 and 1")
# all is OK, change colormap:
hue = lut.GetHueRange()
val = lut.GetValueRange()
# colormaps
def hsv(self, m=256):
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.0, 1.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.SetNumberOfColors(m)
lut.Build()
return lut
def gray(self, m=256):
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.0, 0.0)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(0.0, 1.0)
lut.SetNumberOfColors(m)
lut.Build()
return lut
def hot(self, m=256):
lut = vtk.vtkLookupTable()
inc = 0.01175
lut.SetNumberOfColors(256)
i = 0
r = 0.0; g = 0.0; b = 0.0
while r <= 1.:
lut.SetTableValue(i, r, g, b, 1)
r += inc; i += 1
r = 1.
while g <= 1.:
lut.SetTableValue(i, r, g, b, 1)
g += inc; i += 1
g = 1.
while b <= 1:
if i == 256: break
lut.SetTableValue(i, r, g, b, 1)
b += inc; i += 1
lut.Build()
return lut
def flag(self, m=64):
"""Alternating red, white, blue, and black color map.
- flag(m)
'm' must be a multiple of 4
"""
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
# the last parameter alpha is set to 1 by default
# in method declaration
for i in range(0,m,4):
lut.SetTableValue(i,1,0,0,1) # red
lut.SetTableValue(1+i,1,1,1,1) # white
lut.SetTableValue(2+i,0,0,1,1) # blue
lut.SetTableValue(3+i,0,0,0,1) # black
lut.Build()
return lut
def jet(self, m=256):
# blue, cyan, green, yellow, red, black
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
lut.SetHueRange(0.667,0.0)
lut.Build()
return lut
def blue_to_yellow(self, m=200):
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
for i in range(m):
frac = i / float(m / 2.0 - 1.0)
if (frac <= 1):
r = frac
g = r
b = 1
else:
r = 1
g = r
b = 2 - frac
# SetTableValue(indx, red, green, blue, alpha)
lut.SetTableValue(i, r, g, b, 1)
lut.Build()
return lut
def spring(self, m=256):
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
lut.SetHueRange(0.0, 0.17)
lut.SetSaturationRange(0.5, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.Build()
return lut
def summer(self, m=256):
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
lut.SetHueRange(0.47, 0.17)
lut.SetSaturationRange(1.0, 0.6)
lut.SetValueRange(0.5, 1.0)
lut.Build()
return lut
def winter(self, m=256):
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
lut.SetHueRange(0.8, 0.42)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(0.6, 1.0)
lut.Build()
return lut
def autumn(self, m=256):
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(m)
lut.SetHueRange(0.0, 0.15)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.Build()
return lut
plt = VtkBackend() # Create backend instance
use(plt, globals()) # Export public namespace of plt to globals()
backend = os.path.splitext(os.path.basename(__file__))[0][:-1]
|
11549875
|
import numpy as np
dim = 1
p = 1.0
n_train = 256
n_eval = 128
n_test = 128
low = 0.5
high = 1.0
data_train = np.random.uniform(low=low, high=high, size=(n_train, dim, dim))
mask_train = np.random.binomial(n=1, p=p, size=(n_train, dim, dim))
data_train = np.reshape(data_train * mask_train, (n_train, dim * dim))
data_eval = np.random.uniform(low=low, high=high, size=(n_eval, dim, dim))
mask_eval = np.random.binomial(n=1, p=p, size=(n_eval, dim, dim))
data_eval = np.reshape(data_eval * mask_eval, (n_eval, dim * dim))
data_test = np.random.uniform(low=low, high=high, size=(n_test, dim, dim))
mask_test = np.random.binomial(n=1, p=p, size=(n_test, dim, dim))
data_test = np.reshape(data_test * mask_test, (n_test, dim * dim))
np.savetxt('dim{}_p{}_low{}_high{}_train.txt'.format(dim, p, low, high),
data_train)
np.savetxt('dim{}_p{}_low{}_high{}_eval.txt'.format(dim, p, low, high),
data_eval)
np.savetxt('dim{}_p{}_low{}_high{}_test.txt'.format(dim, p, low, high),
data_test)
|
11549877
|
from arekit.common.data import const
from arekit.common.data.row_ids.base import BaseIDProvider
from arekit.common.data.storages.base import BaseRowsStorage
class BaseSampleStorageView(object):
"""
Pandas-based input samples proovider
"""
def __init__(self, storage, row_ids_provider):
assert(isinstance(row_ids_provider, BaseIDProvider))
assert(isinstance(storage, BaseRowsStorage))
self.__row_ids_provider = row_ids_provider
self._storage = storage
# TODO. #240 This is just a wrapper over storage.
def iter_rows(self, handle_rows):
assert(callable(handle_rows) or handle_rows is None)
for row_index, row in self._storage:
if handle_rows is None:
yield row_index, row
else:
yield handle_rows(row)
def iter_rows_linked_by_text_opinions(self):
undefined = -1
linked = []
current_doc_id = undefined
current_opinion_id = undefined
for row_index, sample_id in enumerate(self._storage.iter_column_values(const.ID)):
sample_id = str(sample_id)
doc_id = self._storage.get_cell(row_index=row_index, column_name=const.DOC_ID)
opinion_id = self.__row_ids_provider.parse_opinion_in_sample_id(sample_id)
if current_doc_id != undefined and current_opinion_id != undefined:
if doc_id != current_doc_id or opinion_id != current_opinion_id:
yield linked
linked = []
else:
current_doc_id = doc_id
current_opinion_id = opinion_id
linked.append(self._storage.get_row(row_index))
if len(linked) > 0:
yield linked
|
11549917
|
print("enter size of array")
s = int(input())
i = 0
b = []
print("enter array elements")
while i < s:
b.append(int(input()))
i = i+1
print(b)
def getsecondhighest(b):
hi = mid = low = 0
for i in range(len(b)):
x = b[i]
if x > hi:
low = mid
mid = hi
hi = x
elif x < low:
low = x
return mid
print("second highest is : ", getsecondhighest(b))
|
11549968
|
import numpy as np
import networkx as nx
import cPickle as cp
import random
import ctypes
import os
import sys
import time
from tqdm import tqdm
sys.path.append( '%s/tsp2d_lib' % os.path.dirname(os.path.realpath(__file__)) )
from tsp2d_lib import Tsp2dLib
def find_model_file(opt):
max_n = int(opt['max_n'])
min_n = int(opt['min_n'])
log_file = '%s/log-%d-%d.txt' % (opt['save_dir'], min_n, max_n)
best_r = 10000000
best_it = -1
with open(log_file, 'r') as f:
for line in f:
if 'average' in line:
line = line.split(' ')
it = int(line[1].strip())
r = float(line[-1].strip())
if r < best_r:
best_r = r
best_it = it
assert best_it >= 0
print 'using iter=', best_it, 'with r=', best_r
return '%s/%s_iter_%d.model' % (opt['save_dir'], opt['sample_name'], best_it)
def GetGraph(fname, need_norm):
norm = 1.0
with open(fname, 'r') as f_tsp:
coors = {}
in_sec = False
n_nodes = -1
for l in f_tsp:
if 'DIMENSION' in l:
n_nodes = int(l.strip().split(' ')[-1].strip())
if in_sec:
idx, x, y = [w.strip() for w in l.strip().split(' ') if len(w.strip())]
idx = int(idx)
if np.fabs(float(x)) > norm:
norm = np.fabs(float(x))
if np.fabs(float(y)) > norm:
norm = np.fabs(float(y))
coors[idx - 1] = [float(x), float(y)]
assert len(coors) == idx
if len(coors) == n_nodes:
break
elif 'NODE_COORD_SECTION' in l:
in_sec = True
if need_norm:
for i in coors:
coors[i][0] /= norm
coors[i][1] /= norm
assert len(coors) == n_nodes
g = nx.Graph()
g.add_nodes_from(range(n_nodes))
nx.set_node_attributes(g, 'pos', coors)
return g
def dist(coors, i, j):
dx = float(coors[i][0] - coors[j][0])
dy = float(coors[i][1] - coors[j][1])
dd = np.sqrt(dx * dx + dy * dy)
return np.round(dd)
def get_val(sol, g):
t = []
for i in range(sol[0]):
t.append(sol[i + 1])
if len(t) != nx.number_of_nodes(g):
print len(t), nx.number_of_nodes(g)
assert len(t) == nx.number_of_nodes(g)
val = 0.0
coors = nx.get_node_attributes(g, 'pos')
for i in range(nx.number_of_nodes(g)):
if i == nx.number_of_nodes(g) - 1:
next = 0
else:
next = i + 1
val += dist(coors, t[i], t[next])
return val
if __name__ == '__main__':
api = Tsp2dLib(sys.argv)
opt = {}
for i in range(1, len(sys.argv), 2):
opt[sys.argv[i][1:]] = sys.argv[i + 1]
fname = '%s/%s/%s' % (opt['data_root'], opt['folder'], opt['sample_name'])
model_file = find_model_file(opt)
assert model_file is not None
print 'loading', model_file
sys.stdout.flush()
api.LoadModel(model_file)
g_norm = GetGraph(fname, True)
api.InsertGraph(g_norm, is_test=True)
g_raw = GetGraph(fname, need_norm=False)
test_name = opt['sample_name']
result_file = '%s/test-%s-gnn-%s-%s.csv' % (opt['save_dir'], test_name, opt['min_n'], opt['max_n'])
with open(result_file, 'w') as f_out:
print 'testing'
sys.stdout.flush()
t1 = time.time()
_, sol = api.GetSol(0, nx.number_of_nodes(g_norm))
t2 = time.time()
val = get_val(sol, g_raw)
f_out.write('%d,' % val)
f_out.write('%d' % sol[0])
for i in range(sol[0]):
f_out.write(' %d' % sol[i + 1])
f_out.write(',%.6f\n' % (t2 - t1))
print 'average tour length: ', val
|
11550040
|
from __future__ import annotations
from librespot.core import ApResolver
from librespot.metadata import AlbumId, ArtistId, EpisodeId, ShowId, TrackId
from librespot.proto import Connect_pb2 as Connect, Metadata_pb2 as Metadata
from librespot.structure import Closeable
import logging
import requests
import typing
if typing.TYPE_CHECKING:
from librespot.core import Session
|
11550041
|
from collections import defaultdict
n, m = map(int, input().split())
A = defaultdict(list)
for i in range(1, n + 1):
A[input()].append(i)
for _ in range(m):
key = input()
if key in A:
print(*A[key], sep=" ")
else:
print(-1)
|
11550050
|
from selenium import webdriver
import os, re
from bs4 import BeautifulSoup
import time
os.chdir("../")
bili_dir = "python/bili_av_cid"
def scrap():
os.makedirs(bili_dir, exist_ok=True)
driver = webdriver.Chrome() # 打开 Chrome 浏览器
driver.get("http://space.bilibili.com/243821484/channel/detail?cid=28254")
input("press enter if page loaded")
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
items = soup.find_all("li", {"class": ["small-item", "fakeDanmu-item"]})
for item in items:
try:
av = item["data-aid"]
except KeyError:
continue
title = item.find_next("a", {"class": "title"}).text
title = title.replace("/", "_")
if os.path.exists(os.path.join(bili_dir, title)):
continue
f_res = open(os.path.join(bili_dir, title), "w")
f_res.write("av" + av + "\n")
base_url = "https://www.bilibili.com/video/av%s/?p=" % av
i = 0
while True:
i+=1
url = base_url+str(i)
driver.get(url)
time.sleep(2)
html = driver.page_source # get html
matched = re.search(r"com/upgcxcode/\d{1,4}/\d{1,4}/(\d{4,12})/", html)
if matched:
cid = matched.group(1)
f_res.write(cid + "\n")
print(cid)
else:
print("NOT FOUND", url)
break
f_res.close()
driver.close()
def switch():
files = os.listdir(bili_dir)
av_dict = {}
for file in files:
path = os.path.join(bili_dir, file)
with open(path, "r") as f:
av_cid = f.readlines()
av = av_cid[0].strip()
cids = [c.strip() for c in av_cid[1:]]
av_dict[av] = cids
for root, dirs, files in os.walk("_tutorials"):
if len(files) == 0:
continue
files.sort()
count = 0
for f in files:
if f.startswith("."):
continue
path = os.path.join(root, f)
print(path)
with open(path, "r") as f_data:
f_str = f_data.read()
av = re.search(r"bilibili_id:[ ]?(\d{6,8})", f_str)
if av:
av = av.group(1)
else:
continue
if path == "_tutorials/machine-learning/ML-practice/build-car-classifier-from-scratch1.md":
count = 0
try:
f_str = re.sub(r"bilibili_id:[ ]?\d{6,8}&page=\d{1,3}\n", "b_av: %s\nb_cid: %s\nb_page: %d\n" % (av, av_dict["av"+av][count], count+1), f_str)
except KeyError:
continue
count += 1
with open(path, "w") as f_data:
f_data.write(f_str)
# scrap()
switch()
|
11550062
|
from .agent import Agent
from .agents import AC, DQN, PG, TQL, RandomAgent
from .bandits import BanditExperiment, MultiArmedBandit, average_runs
from .environment import Environment
from .openai_gym import OpenAIGym
|
11550066
|
from typing import Dict, List
from .extract_semantic import extract_method_statements_semantic
from .create_extraction_opportunities import create_extraction_opportunities
from ._syntactic_filter import syntactic_filter
from ._semantic_filter import semantic_filter
from ._common_types import Statement, StatementSemantic, ExtractionOpportunity
from ._common_cli import common_cli
from veniq.ast_framework import AST
from veniq.ast_framework.block_statement_graph import build_block_statement_graph
def filter_extraction_opportunities(
extraction_opportunities: List[ExtractionOpportunity],
statements_semantic: Dict[Statement, StatementSemantic],
method_ast: AST,
) -> List[ExtractionOpportunity]:
block_statement_graph = build_block_statement_graph(method_ast)
extraction_opportunities_filtered = filter(
lambda extraction_opportunity: syntactic_filter(extraction_opportunity, block_statement_graph)
and semantic_filter(extraction_opportunity, statements_semantic, block_statement_graph),
extraction_opportunities,
)
return list(extraction_opportunities_filtered)
def _print_extraction_opportunities(method_ast: AST, filepath: str, class_name: str, method_name: str):
statements_semantic = extract_method_statements_semantic(method_ast)
extraction_opportunities = create_extraction_opportunities(statements_semantic)
filtered_extraction_opportunities = filter_extraction_opportunities(
extraction_opportunities, statements_semantic, method_ast
)
print(
f"{len(filtered_extraction_opportunities)} opportunities found in method {method_name} "
f"in class {class_name} in file {filepath}:"
)
for index, extraction_opportunity in enumerate(filtered_extraction_opportunities):
first_statement = extraction_opportunity[0]
last_statement = extraction_opportunity[-1]
print(
f"{index}th extraction opportunity:\n"
f"\tFirst statement: {first_statement.node_type} on line {first_statement.line}\n"
f"\tLast statement: {last_statement.node_type} on line {last_statement.line}\n"
)
if __name__ == "__main__":
common_cli(
_print_extraction_opportunities,
"Creates extraction opportunities and filter them based on syntactic and semantic conditions",
)
|
11550096
|
import json
import logging
import sys
import tempfile
from unittest import mock
import pytest
import structlog
from meltano.core.logging.output_logger import Out, OutputLogger
from structlog.testing import LogCapture
def assert_lines(output, *lines):
for line in lines:
assert line in output
class TestOutputLogger:
@pytest.fixture
def log(self, tmp_path):
return tempfile.NamedTemporaryFile(mode="w+", dir=tmp_path)
@pytest.fixture
def subject(self, log):
return OutputLogger(log.name)
@pytest.fixture(name="log_output")
def fixture_log_output(self):
return LogCapture()
@pytest.fixture(autouse=True)
def fixture_configure_structlog(self, log_output):
structlog.configure(
processors=[log_output],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
)
@pytest.fixture(name="redirect_handler")
def redirect_handler(self, subject: OutputLogger) -> logging.Handler:
formatter = structlog.stdlib.ProcessorFormatter(
processor=structlog.processors.JSONRenderer(), # use a json renderer so output is easier to verify
)
handler = logging.FileHandler(subject.file)
handler.setFormatter(formatter)
return handler
@pytest.mark.asyncio
async def test_stdio_capture(self, log, subject, log_output):
stdout_out = subject.out("stdout")
stderr_out = subject.out("stderr")
async with stdout_out.redirect_stdout():
sys.stdout.write("STD")
sys.stdout.write("OUT\n")
print("STDOUT 2")
assert_lines(
log_output.entries,
{
"name": "stdout",
"event": "STDOUT",
"log_level": "info",
},
{
"name": "stdout",
"event": "STDOUT 2",
"log_level": "info",
},
)
async with stderr_out.redirect_stderr():
sys.stderr.write("STD")
sys.stderr.write("ERR\n")
print("STDERR 2", file=sys.stderr)
assert_lines(
log_output.entries,
{
"name": "stderr",
"event": "STDERR",
"log_level": "info",
},
{
"name": "stderr",
"event": "STDERR 2",
"log_level": "info",
},
)
@pytest.mark.asyncio
async def test_out_writers(self, log, subject, log_output):
writer_out = subject.out("writer")
line_writer_out = subject.out("lwriter")
basic_out = subject.out("basic")
async with writer_out.writer() as writer:
writer.write("WRI")
writer.write("TER\n")
writer.write("WRITER 2\n")
with line_writer_out.line_writer() as line_writer:
line_writer.write("LINE\n")
line_writer.write("LINE 2\n")
basic_out.writeline("LINE\n")
basic_out.writeline("LINE 2\n")
assert_lines(
log_output.entries,
{
"name": "writer",
"event": "WRITER",
"log_level": "info",
},
{
"name": "writer",
"event": "WRITER 2",
"log_level": "info",
},
{
"name": "lwriter",
"event": "LINE",
"log_level": "info",
},
{
"name": "lwriter",
"event": "LINE 2",
"log_level": "info",
},
{"name": "basic", "event": "LINE", "log_level": "info"},
{
"name": "basic",
"event": "LINE 2",
"log_level": "info",
},
)
@pytest.mark.asyncio
async def test_set_custom_logger(self, log, subject, log_output):
logger = structlog.getLogger()
out = subject.out("basic", logger.bind(is_test=True))
out.writeline("LINE\n")
assert_lines(
log_output.entries,
{
"name": "basic",
"event": "LINE",
"log_level": "info",
"is_test": True,
},
)
@pytest.mark.asyncio
async def test_logging_redirect(self, log, subject, log_output, redirect_handler):
logging_out = subject.out("logging")
with mock.patch.object(Out, "redirect_log_handler", redirect_handler):
with logging_out.redirect_logging():
logging.info("info")
logging.warning("warning")
logging.error("error")
with open(subject.file) as logf:
log_file_contents = [json.loads(line) for line in logf.readlines()]
assert_lines(
log_file_contents,
{"event": "info"},
{"event": "warning"},
{"event": "error"},
)
def test_logging_exception(self, log, subject, redirect_handler):
logging_out = subject.out("logging")
# it raises logs unhandled exceptions
exception = Exception("exception")
with pytest.raises(Exception) as exc:
with mock.patch.object(Out, "redirect_log_handler", redirect_handler):
with logging_out.redirect_logging():
raise exception
# make sure it let the exception through
assert exc.value is exception
log_content = json.loads(log.read())
# make sure the exception is logged
assert log_content.get("event") == "exception"
assert log_content.get("exc_info")
|
11550127
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from models.bert import modeling
from models.bert import modeling_slice
import tensorflow as tf
from tensorflow import logging
import sys
slim = tf.contrib.slim
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def bert_arg_scope(
weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Returns the scope with the default parameters.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
a arg_scope with the parameters.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=activation_fn,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
class BertFinetune(object):
"""
Fintune Method based on Bert.
"""
def __init__(self, bert_config_file, max_seq_length, is_training,
input_ids, input_mask, segment_ids, labels, use_one_hot_embeddings,
model_type='classification', kwargs=None):
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
if max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(max_seq_length, bert_config.max_position_embeddings))
self.model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
self.bert_config = bert_config
self.kwargs = kwargs
self.labels = labels
self.input_ids = input_ids
if model_type == 'classification':
self.build_output_layer_classification()
elif model_type == 'regression':
self.build_output_layer_regression()
elif model_type == 'mrc':
self.build_output_layer_squad()
elif model_type == 'pretrain':
self.build_pretrain()
else:
raise ValueError("model_type should be one of ['classification', "
"'regression', pretrain', 'mrc'].")
self.saver = tf.train.Saver(
var_list=tf.global_variables(),
max_to_keep=2)
def restore(self, saver_directory, sess):
checkpoint = tf.train.latest_checkpoint(saver_directory)
if not checkpoint:
logging.info("Couldn't find trained model at %s." % saver_directory)
else:
logging.info('restore from {}'.format(checkpoint))
self.saver.restore(sess, checkpoint)
def save(self, saver_directory, sess, step=None):
logging.info("Save to %s." % saver_directory)
if step is not None:
self.saver.save(sess, saver_directory, global_step=step)
else:
self.saver.save(sess, saver_directory)
def build_pretrain(self):
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = self.get_masked_lm_output(
self.bert_config,
self.model.get_sequence_output(),
self.model.get_embedding_table(),
self.kwargs['masked_lm_positions'],
self.kwargs['masked_lm_ids'],
self.kwargs['masked_lm_weights'])
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = self.get_next_sentence_output(
self.bert_config,
self.model.get_pooled_output(),
self.kwargs['next_sentence_labels'])
self.loss = masked_lm_loss + next_sentence_loss
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(self.kwargs['masked_lm_ids'], [-1])
masked_lm_weights = tf.reshape(self.kwargs['masked_lm_weights'], [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(self.kwargs['next_sentence_labels'], [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
self.eval_metric = {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
def get_masked_lm_output(self, bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# log_probs = tf.nn.log_softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(self, bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# log_probs = tf.nn.log_softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def build_output_layer_regression(self):
with tf.variable_scope("src-output-layer"):
self.src_estimation = tf.contrib.layers.fully_connected(
inputs=self.model.get_pooled_output(),
num_outputs=1,
activation_fn=None, #tf.nn.sigmoid
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=1e-3),
biases_initializer=tf.constant_initializer(1e-04),
scope="FC"
)
self.src_prediction = self.src_estimation
self.src_pred_cost = tf.add(
tf.reduce_mean(tf.pow(self.src_prediction - self.labels, 2)),
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
name="src_cost")
self.loss = self.src_pred_cost
self.logits = self.src_estimation
self.predictions = self.src_prediction
self.accuracy = tf.metrics.accuracy(self.labels, self.predictions)
print('loss', self.loss)
print('logits', self.logits)
print('predictions', self.predictions)
print('accuracy', self.accuracy)
def build_output_layer_classification(self):
with tf.variable_scope("src-output-layer"):
self.src_estimation = tf.contrib.layers.fully_connected(
inputs=self.model.get_pooled_output(),
num_outputs=2,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=1e-3),
biases_initializer=tf.constant_initializer(1e-04),
scope="FC"
)
self.src_prediction = tf.contrib.layers.softmax(self.src_estimation)[:, 1]
self.src_pred_cost = tf.add(
tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.src_estimation, labels=self.labels)),
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
name="src_cost")
self.loss = self.src_pred_cost
self.logits = self.src_estimation
self.predictions = self.src_prediction
self.accuracy = tf.metrics.accuracy(self.labels, self.predictions)
print('logits', self.logits)
print('predictions', self.predictions)
print('accuracy', self.accuracy)
def build_output_layer_squad(self, is_training=False):
final_hidden = self.model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
# compute loss
seq_length = modeling.get_shape_list(self.input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
def def_loss():
start_positions = self.kwargs["start_positions"]
end_positions = self.kwargs["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
loss = (start_loss + end_loss) / 2.0
return loss
self.loss = def_loss()
def build_output_layer(self, is_training):
output_layer = self.model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [2], initializer=tf.zeros_initializer())
print('output_layer', output_layer.shape)
print('output_weights', output_weights.shape)
print('output_bias', output_bias.shape)
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
self.logits = logits
log_probs = tf.nn.log_softmax(self.logits)
print('logits', logits.shape)
one_hot_labels = tf.one_hot(self.labels, depth=2,
dtype=tf.float32)
self.per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
self.loss = tf.reduce_mean(self.per_example_loss)
self.predictions = tf.argmax(self.logits, axis=-1, output_type=tf.int32)
self.accuracy = tf.metrics.accuracy(self.labels, self.predictions)
class BertFinetuneSlice(object):
"""
Fintune Method based on Bert.
"""
def __init__(self, bert_config_file, max_seq_length, is_training,
input_ids, input_mask, segment_ids, labels, use_one_hot_embeddings,
model_type='classification', slice_devices="/device:GPU:0",
dep_outputs=None, kwargs=None):
bert_config = modeling_slice.BertConfig.from_json_file(bert_config_file)
if max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(max_seq_length, bert_config.max_position_embeddings))
self.model = modeling_slice.BertModelSlice(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if not isinstance(slice_devices, list):
logging.info("SLICE DEVICES: ", slice_devices)
self.devices = slice_devices.split(",")
else:
self.devices = slice_devices
self.stages = self.model.stages
ndev = len(self.devices)
nstage = len(self.stages)
def calc_device(i):
# Bert-24
if nstage == 27:
# 11:13
return 0 if i < 13 else 1
# Bert-48
elif nstage == 51:
# 23:25
return 0 if i < 25 else 1
else:
print("Unrecognized nstage, only bert-24 and bert-48 are supported.")
sys.exit(0)
# idx = int((i+2) / ((nstage+1) / ndev + 1))
# return idx
self.stage_outputs = []
prev_output = input_ids
prev_device_idx = 0
for i in xrange(nstage):
device_idx = calc_device(i)
if (i == 0 or device_idx != prev_device_idx) and \
(dep_outputs is not None and dep_outputs[device_idx] is not None):
#print ("***DEPS***", dep_outputs[device_idx])
dep = dep_outputs[device_idx] if isinstance(dep_outputs[device_idx], list) else [dep_outputs[device_idx]]
with tf.control_dependencies(dep), tf.device(self.devices[device_idx]):
output = self.stages[i](prev_output)
if device_idx != prev_device_idx:
self.stage_outputs.append(prev_output)
prev_device_idx = device_idx
prev_output = output
continue
if device_idx != prev_device_idx:
self.stage_outputs.append(prev_output)
prev_device_idx = device_idx
#with tf.control_dependencies([prev_output]), tf.device(self.devices[device_idx]):
with tf.device(self.devices[device_idx]):
output = self.stages[i](prev_output)
prev_output = output
self.bert_config = bert_config
self.kwargs = kwargs
self.labels = labels
self.input_ids = input_ids
if model_type == 'classification':
self.build_output_layer_classification()
elif model_type == 'regression':
self.build_output_layer_regression()
elif model_type == 'mrc':
with tf.device(self.devices[device_idx]):
self.build_output_layer_squad()
self.stage_outputs.append(self.loss)
elif model_type == 'pretrain':
self.build_pretrain()
else:
raise ValueError("model_type should be one of ['classification', "
"'regression', pretrain', 'mrc'].")
self.saver = tf.train.Saver(
var_list=tf.global_variables(),
max_to_keep=2)
def restore(self, saver_directory, sess):
checkpoint = tf.train.latest_checkpoint(saver_directory)
if not checkpoint:
logging.info("Couldn't find trained model at %s." % saver_directory)
else:
logging.info('restore from {}'.format(checkpoint))
self.saver.restore(sess, checkpoint)
def save(self, saver_directory, sess, step=None):
logging.info("Save to %s." % saver_directory)
if step is not None:
self.saver.save(sess, saver_directory, global_step=step)
else:
self.saver.save(sess, saver_directory)
def build_pretrain(self):
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = self.get_masked_lm_output(
self.bert_config,
self.model.get_sequence_output(),
self.model.get_embedding_table(),
self.kwargs['masked_lm_positions'],
self.kwargs['masked_lm_ids'],
self.kwargs['masked_lm_weights'])
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = self.get_next_sentence_output(
self.bert_config,
self.model.get_pooled_output(),
self.kwargs['next_sentence_labels'])
self.loss = masked_lm_loss + next_sentence_loss
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(self.kwargs['masked_lm_ids'], [-1])
masked_lm_weights = tf.reshape(self.kwargs['masked_lm_weights'], [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(self.kwargs['next_sentence_labels'], [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
self.eval_metric = {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
def get_masked_lm_output(self, bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling_slice.get_activation(bert_config.hidden_act),
kernel_initializer=modeling_slice.create_initializer(
bert_config.initializer_range))
input_tensor = modeling_slice.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# log_probs = tf.nn.log_softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(self, bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling_slice.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
# log_probs = tf.nn.log_softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def build_output_layer_regression(self):
with tf.variable_scope("src-output-layer"):
self.src_estimation = tf.contrib.layers.fully_connected(
inputs=self.model.get_pooled_output(),
num_outputs=1,
activation_fn=None, #tf.nn.sigmoid
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=1e-3),
biases_initializer=tf.constant_initializer(1e-04),
scope="FC"
)
self.src_prediction = self.src_estimation
self.src_pred_cost = tf.add(
tf.reduce_mean(tf.pow(self.src_prediction - self.labels, 2)),
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
name="src_cost")
self.loss = self.src_pred_cost
self.logits = self.src_estimation
self.predictions = self.src_prediction
self.accuracy = tf.metrics.accuracy(self.labels, self.predictions)
print('loss', self.loss)
print('logits', self.logits)
print('predictions', self.predictions)
print('accuracy', self.accuracy)
def build_output_layer_classification(self):
with tf.variable_scope("src-output-layer"):
self.src_estimation = tf.contrib.layers.fully_connected(
inputs=self.model.get_pooled_output(),
num_outputs=2,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=1e-3),
biases_initializer=tf.constant_initializer(1e-04),
scope="FC"
)
self.src_prediction = tf.contrib.layers.softmax(self.src_estimation)[:, 1]
self.src_pred_cost = tf.add(
tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.src_estimation, labels=self.labels)),
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
name="src_cost")
self.loss = self.src_pred_cost
self.logits = self.src_estimation
self.predictions = self.src_prediction
self.accuracy = tf.metrics.accuracy(self.labels, self.predictions)
print('logits', self.logits)
print('predictions', self.predictions)
print('accuracy', self.accuracy)
def build_output_layer_squad(self, is_training=False):
final_hidden = self.model.get_sequence_output()
final_hidden_shape = modeling_slice.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
with tf.variable_scope("cls/squad", reuse=tf.AUTO_REUSE):
output_weights = tf.get_variable(
"output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
# compute loss
seq_length = modeling_slice.get_shape_list(self.input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
def def_loss():
start_positions = self.kwargs["start_positions"]
end_positions = self.kwargs["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
loss = (start_loss + end_loss) / 2.0
return loss
self.loss = def_loss()
def build_output_layer(self, is_training):
output_layer = self.model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [2], initializer=tf.zeros_initializer())
print('output_layer', output_layer.shape)
print('output_weights', output_weights.shape)
print('output_bias', output_bias.shape)
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
self.logits = logits
log_probs = tf.nn.log_softmax(self.logits)
print('logits', logits.shape)
one_hot_labels = tf.one_hot(self.labels, depth=2,
dtype=tf.float32)
self.per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
self.loss = tf.reduce_mean(self.per_example_loss)
self.predictions = tf.argmax(self.logits, axis=-1, output_type=tf.int32)
self.accuracy = tf.metrics.accuracy(self.labels, self.predictions)
|
11550160
|
from pydip.map.predefined.vanilla_dip import generate_map, generate_supply_center_map
def test_territory_adjacency_counts():
game_map = generate_map()
expected_counts = {
'North Atlantic Ocean' : 5,
'Mid-Atlantic Ocean' : 10,
'Irish Sea' : 5,
'English Channel' : 8,
'Brest' : 3,
'Brest Coast' : 4,
'Gascony' : 5,
'Gascony Coast' : 3,
'Spain' : 3,
'Spain North Coast' : 3,
'Spain South Coast' : 5,
'Portugal' : 1,
'Portugal Coast' : 3,
'Western Mediterranean Sea' : 6,
'North Africa' : 1,
'North Africa Coast' : 3,
'Tunis' : 1,
'Tunis Coast' : 4,
'Ionian Sea' : 9,
'Tyrrhenian Sea' : 7,
'Gulf of Lyon' : 6,
'Marseilles' : 4,
'Marseilles Coast' : 3,
'Burgundy' : 7,
'Paris' : 4,
'Picardy' : 4,
'Picardy Coast': 3,
'Wales' : 3,
'Wales Coast' : 4,
'London' : 2,
'London Coast' : 4,
'Yorkshire' : 4,
'Yorkshire Coast' : 3,
'Liverpool' : 4,
'Liverpool Coast' : 4,
'Clyde' : 2,
'Clyde Coast' : 4,
'Edinburgh' : 3,
'Edinburgh Coast' : 4,
'Norwegian Sea' : 6,
'North Sea' : 11,
'Skagerrak' : 4,
'Helgoland Bight' : 4,
'Denmark' : 2,
'Denmark Coast' : 6,
'Holland' : 3,
'Holland Coast' : 4,
'Belgium' : 4,
'Belgium Coast' : 4,
'Ruhr' : 5,
'Munich' : 7,
'Piedmont' : 4,
'Piedmont Coast' : 3,
'Tuscany' : 3,
'Tuscany Coast' : 4,
'Naples' : 2,
'Naples Coast' : 4,
'Rome' : 4,
'Rome Coast' : 3,
'Apulia' : 3,
'Apulia Coast' : 4,
'Venice' : 6,
'Venice Coast' : 3,
'Tyrolia' : 6,
'Kiel' : 5,
'Kiel Coast' : 5,
'Berlin' : 4,
'Berlin Coast' : 3,
'Prussia' : 4,
'Prussia Coast' : 3,
'Silesia' : 6,
'Bohemia' : 5,
'Vienna' : 5,
'Trieste' : 6,
'Trieste Coast' : 3,
'Albania' : 3,
'Albania Coast' : 4,
'Greece' : 3,
'Greece Coast' : 4,
'Serbia' : 6,
'Budapest' : 5,
'Galicia' : 7,
'Warsaw' : 6,
'Livonia' : 4,
'Livonia Coast' : 4,
'Moscow' : 5,
'St. Petersburg' : 4,
'St. Petersburg North Coast' : 2,
'St. Petersburg South Coast' : 3,
'Ukraine' : 5,
'Sevastopol' : 4,
'Sevastopol Coast' : 3,
'Rumania' : 6,
'Rumania Coast' : 3,
'Bulgaria' : 4,
'Bulgaria North Coast' : 3,
'Bulgaria South Coast' : 3,
'Constantinople' : 3,
'Constantinople Coast' : 6,
'Ankara' : 3,
'Ankara Coast' : 3,
'Smyrna' : 4,
'Smyrna Coast' : 4,
'Syria' : 2,
'Syria Coast' : 2,
'Armenia' : 4,
'Armenia Coast' : 3,
'Adriatic Sea' : 5,
'Aegean Sea' : 6,
'Eastern Mediterranean Sea' : 4,
'Black Sea' : 6,
'Gulf of Bothnia' : 5,
'Baltic Sea' : 7,
'Barents Sea' : 3,
'Finland' : 3,
'Finland Coast' : 3,
'Sweden' : 3,
'Sweden Coast' : 6,
'Norway' : 3,
'Norway Coast' : 6,
}
assert expected_counts.keys() == game_map.name_map.keys()
for name, count in expected_counts.items():
assert len(game_map.adjacency[name]) == count
def test_supply_center_counts():
game_map = generate_supply_center_map()
assert len(game_map.supply_centers) == 34
|
11550210
|
import os
import argparse
import warnings
warnings.simplefilter('ignore')
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
def str2bool(v):
return v.lower() in ('true')
def main(config):
# For fast training.
cudnn.benchmark = True
# Create directories if not exist.
os.makedirs(config.log_dir, exist_ok=True)
os.makedirs(config.model_save_dir, exist_ok=True)
os.makedirs(config.sample_dir, exist_ok=True)
data_loader = get_loader(config.crop_size, config.image_size, config.batch_size,
config.dataset, config.mode, config.num_workers, config.line_type)
solver = Solver(data_loader, config)
if config.mode == 'train':
solver.train()
# elif config.mode == 'test':
# solver.test()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--crop_size', type=int, default=256, help='crop size for the CelebA dataset')
parser.add_argument('--image_size', type=int, default=276, help='image resolution')
parser.add_argument('--g_conv_dim', type=int, default=16, help='number of conv filters in the first layer of G')
parser.add_argument('--d_conv_dim', type=int, default=64, help='number of conv filters in the first layer of D')
parser.add_argument('--d_channel', type=int, default=448)
parser.add_argument('--channel_1x1', type=int, default=256)
parser.add_argument('--d_repeat_num', type=int, default=6, help='number of strided conv layers in D')
parser.add_argument('--lambda_rec', type=float, default=30, help='weight for reconstruction loss')
parser.add_argument('--lambda_gp', type=float, default=10, help='weight for gradient penalty')
parser.add_argument('--lambda_perc', type=float, default=0.01)
parser.add_argument('--lambda_style', type=float, default=50)
parser.add_argument('--lambda_tr', type=float, default=1)
# Training configuration.
parser.add_argument('--dataset', type=str, default='line_art') # , choices=['line_art, tag2pix']
parser.add_argument('--line_type', type=str, default='xdog') # , choices=['xdog, keras']
parser.add_argument('--batch_size', type=int, default=16, help='mini-batch size')
parser.add_argument('--num_epoch', type=int, default=200, help='number of total iterations for training D')
parser.add_argument('--num_epoch_decay', type=int, default=100, help='number of iterations for decaying lr')
parser.add_argument('--g_lr', type=float, default=0.0002, help='learning rate for G') # Note that original paper is set to 0.0001.
parser.add_argument('--d_lr', type=float, default=0.0002, help='learning rate for D')
parser.add_argument('--n_critic', type=int, default=1, help='number of D updates per each G update')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')
# Test configuration.
parser.add_argument('--test_epoch', type=int, default=200000, help='test model from this step')
# Miscellaneous.
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
# Directories.
parser.add_argument('--result_dir', type=str, default='results')
parser.add_argument('--exp_name', type=str, default='baseline')
# Step size.
parser.add_argument('--log_step', type=int, default=200)
parser.add_argument('--sample_epoch', type=int, default=1)
parser.add_argument('--model_save_step', type=int, default=40)
config = parser.parse_args()
config.log_dir = os.path.join(config.result_dir, config.exp_name, 'log')
config.sample_dir = os.path.join(config.result_dir, config.exp_name, config.exp_name)
config.model_save_dir = os.path.join(config.result_dir, config.exp_name, 'model')
print(config)
main(config)
|
11550238
|
from datetime import datetime, time, date
from dateutil.rrule import weekday, rrule
from django.db import models
from django.db.models import Q
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from ...constants import frequency, status
from ...utils.slug_utils import generate_unique_slug
from ..abstract_content_model import AbstractContentModel, ContentQuerySet
from ..media.media_file import MediaFile
from ..pois.poi import POI
from .event_translation import EventTranslation
from .recurrence_rule import RecurrenceRule
class EventQuerySet(ContentQuerySet):
"""
Custom QuerySet to facilitate the filtering by date while taking recurring events into account.
"""
def filter_upcoming(self, from_date=date.today()):
"""
Filter all events that take place after the given date. This is, per definition, if at least one of the
following conditions is true:
* The end date of the event is the given date or later
* The event is indefinitely recurring
* The event is recurring and the recurrence end date is the given date or later
:param from_date: The date which should be used for filtering, defaults to the current date
:type from_date: datetime.date
:return: The Queryset of events after the given date
:rtype: ~integreat_cms.cms.models.events.event.EventQuerySet
"""
return self.filter(
Q(end_date__gte=from_date)
| Q(
recurrence_rule__isnull=False,
recurrence_rule__recurrence_end_date__isnull=True,
)
| Q(
recurrence_rule__isnull=False,
recurrence_rule__recurrence_end_date__gte=from_date,
)
)
def filter_completed(self, to_date=date.today()):
"""
Filter all events that are not ongoing and don't have any occurrences in the future. This is, per definition, if
at least one of the following conditions is true:
* The event is non-recurring and the end date of the event is before the given date
* The event is recurring and the recurrence end date is before the given date
:param to_date: The date which should be used for filtering, defaults to the current date
:type to_date: datetime.date
:return: The Queryset of events before the given date
:rtype: ~integreat_cms.cms.models.events.event.EventQuerySet
"""
return self.filter(
Q(recurrence_rule__isnull=True, end_date__lt=to_date)
| Q(
recurrence_rule__isnull=False,
recurrence_rule__recurrence_end_date__lt=to_date,
)
)
class Event(AbstractContentModel):
"""
Data model representing an event.
Can be directly imported from :mod:`~integreat_cms.cms.models`.
"""
location = models.ForeignKey(
POI,
null=True,
blank=True,
on_delete=models.PROTECT,
verbose_name=_("location"),
)
start_date = models.DateField(verbose_name=_("start date"))
start_time = models.TimeField(blank=True, verbose_name=_("start time"))
end_date = models.DateField(verbose_name=_("end date"))
end_time = models.TimeField(blank=True, verbose_name=_("end time"))
#: If the event is recurring, the recurrence rule contains all necessary information on the frequency, interval etc.
#: which is needed to calculate the single instances of a recurring event
recurrence_rule = models.OneToOneField(
RecurrenceRule,
null=True,
on_delete=models.SET_NULL,
related_name="event",
verbose_name=_("recurrence rule"),
)
icon = models.ForeignKey(
MediaFile,
verbose_name=_("icon"),
on_delete=models.SET_NULL,
blank=True,
null=True,
)
archived = models.BooleanField(default=False, verbose_name=_("archived"))
#: The default manager
objects = EventQuerySet.as_manager()
@property
def fallback_translations_enabled(self):
"""
Whether translations should be returned in the default language if they do not exist
:return: Whether fallback translations are enabled
:rtype: bool
"""
return self.region.fallback_translations_enabled
@staticmethod
def get_translation_model():
"""
Returns the translation model of this content model
:return: The class of translations
:rtype: type
"""
return EventTranslation
@cached_property
def is_recurring(self):
"""
This property checks if the event has a recurrence rule and thereby determines, whether the event is recurring.
:return: Whether the event is recurring or not
:rtype: bool
"""
return bool(self.recurrence_rule)
@cached_property
def is_all_day(self):
"""
This property checks whether an event takes place the whole day by checking if start time is minimal and end
time is maximal.
:return: Whether event takes place all day
:rtype: bool
"""
return self.start_time == time.min and self.end_time == time.max.replace(
second=0, microsecond=0
)
@cached_property
def has_location(self):
"""
This property checks whether the event has a physical location (:class:`~integreat_cms.cms.models.pois.poi.POI`).
:return: Whether event has a physical location
:rtype: bool
"""
return bool(self.location)
def get_occurrences(self, start, end):
"""
Get occurrences of the event that overlap with ``[start, end]``.
Expects ``start < end``.
:param start: the begin of the requested interval.
:type start: ~datetime.datetime
:param end: the end of the requested interval.
:type end: ~datetime.datetime
:return: start datetimes of occurrences of the event that are in the given timeframe
:rtype: list [ ~datetime.datetime ]
"""
event_start = datetime.combine(
self.start_date, self.start_time if self.start_time else time.min
)
event_end = datetime.combine(
self.end_date, self.end_time if self.end_time else time.max
)
event_span = event_end - event_start
recurrence = self.recurrence_rule
if recurrence is not None:
until = min(
end,
datetime.combine(
recurrence.recurrence_end_date
if recurrence.recurrence_end_date
else date.max,
time.max,
),
)
if recurrence.frequency in (frequency.DAILY, frequency.YEARLY):
occurrences = rrule(
recurrence.frequency,
dtstart=event_start,
interval=recurrence.interval,
until=until,
)
elif recurrence.frequency == frequency.WEEKLY:
occurrences = rrule(
recurrence.frequency,
dtstart=event_start,
interval=recurrence.interval,
byweekday=recurrence.weekdays_for_weekly,
until=until,
)
else:
occurrences = rrule(
recurrence.frequency,
dtstart=event_start,
interval=recurrence.interval,
byweekday=weekday(
recurrence.weekday_for_monthly, recurrence.week_for_monthly
),
until=until,
)
return [
x
for x in occurrences
if start <= x <= end or start <= x + event_span <= end
]
return (
[event_start]
if start <= event_start <= end or start <= event_end <= end
else []
)
def copy(self, user):
"""
This method creates a copy of this event and all of its translations.
This method saves the new event.
:param user: The user who initiated this copy
:type user: ~django.contrib.auth.models.User
:return: A copy of this event
:rtype: ~integreat_cms.cms.models.events.event.Event
"""
# save all translations on the original object, so that they can be copied later
translations = list(self.translations.all())
# Clear the own recurrence rule.
# If the own recurrence rule would not be cleared, django would throw an
# error that the recurrence rule is not unique (because it would belong to both
# the cloned and the new object)
recurrence_rule = self.recurrence_rule
if recurrence_rule:
# copy the recurrence rule, if it exists
recurrence_rule.pk = None
recurrence_rule.save()
self.recurrence_rule = recurrence_rule
# create the copied event
self.pk = None
self.save()
copy_translation = _("copy")
# Create new translations for this event
for translation in translations:
translation.pk = None
translation.event = self
translation.status = status.DRAFT
translation.title = f"{translation.title} ({copy_translation})"
translation.slug = generate_unique_slug(
**{
"slug": translation.slug,
"manager": type(translation).objects,
"object_instance": translation,
"foreign_model": "event",
"instance": self,
"region": self.region,
"language": translation.language,
}
)
translation.currently_in_translation = False
translation.creator = user
translation.save()
return self
class Meta:
#: The verbose name of the model
verbose_name = _("event")
#: The plural verbose name of the model
verbose_name_plural = _("events")
#: The name that will be used by default for the relation from a related object back to this one
default_related_name = "events"
#: The fields which are used to sort the returned objects of a QuerySet
ordering = ["start_date", "start_time"]
#: The default permissions for this model
default_permissions = ("change", "delete", "view")
#: The custom permissions for this model
permissions = (("publish_event", "Can publish events"),)
|
11550258
|
import argparse
import psutil
import signal
import subprocess
import threading
import time
import uuid
import tempfile
import shutil
import pytest
import os
from rebus.agent import Agent, AgentRegistry
from rebus.bus import BusRegistry, DEFAULT_DOMAIN
import rebus.agents
import rebus.buses
rebus.agents.import_all()
rebus.buses.import_all()
# This file implements integration testing - injects a file to the bus, checks
# that it has properly been received by agents & storage, that no agent has
# crashed.
# Warning: py.test hides exceptions that happen during the test; run REbus
# without py.test when tests misbehave
# TODO tests (cf bta/test/test_miners): all agents are registered / instantiate
# without crashing
# TODO add argument parsing tests - check that help is displayed
@pytest.fixture(scope='function', params=['diskstorage', 'ramstorage'])
def storage(request):
"""
Returns a string that describes the storage type, and a list containing
arguments for the storage backend.
Perform setup & teardown for storage.
"""
# py.test-provided fixture "tmpdir" does not guarantee an empty temp
# directory, which get re-used when test is run again - rolling our own...
args = []
if request.param == 'diskstorage':
tmpdir = tempfile.mkdtemp('rebus-test-%s' % request.param)
args = ['diskstorage', '--path', tmpdir]
def fin():
shutil.rmtree(tmpdir)
request.addfinalizer(fin)
return (request.param, args)
@pytest.fixture(scope='function', params=['localbus', 'dbus', 'rabbit'])
def bus(request, storage):
"""
Returns fixture parameters and a function that returns a bus instance.
"""
storagetype, storageparams = storage
if request.param == 'dbus':
check_master_not_running()
# launch rebus master
process = subprocess.Popen(['rebus_master', 'dbus'] + storageparams,
stderr=subprocess.PIPE)
# wait for master bus to be ready - TODO look into & fix race
time.sleep(0.5)
output = ""
# output = process.stderr.read(1)
def fin():
process.send_signal(signal.SIGINT)
process.wait()
assert process.returncode == 0, output + process.stderr.read()
request.addfinalizer(fin)
def return_bus():
busclass = rebus.bus.BusRegistry.get(request.param)
bus_parser = argparse.ArgumentParser()
busclass.add_arguments(bus_parser)
bus_options = bus_parser.parse_args([])
return busclass(bus_options)
elif request.param == 'rabbit':
check_master_not_running()
# launch rebus master
args = "rebus_master -v rabbit".split(' ')
process = subprocess.Popen(args + storageparams,
stderr=subprocess.PIPE)
# wait for master bus to be ready - TODO look into & fix race
# TODO check queues are empty or empty them
# until then: run the following commands to empty queues
# rabbitmqctl stop_app; rabbitmqctl reset; rabbitmqctl start_app
time.sleep(1)
def fin():
os.kill(process.pid, signal.SIGINT)
process.wait()
assert process.returncode == 0, process.stderr.read()
request.addfinalizer(fin)
def return_bus():
busclass = rebus.bus.BusRegistry.get(request.param)
bus_parser = argparse.ArgumentParser()
busclass.add_arguments(bus_parser)
bus_options = bus_parser.parse_args([])
return busclass(bus_options)
elif request.param == 'localbus':
# always return the same bus instance
if storagetype == 'diskstorage':
pytest.skip("diskstorage is not supported by localbus")
bus_options = argparse.Namespace()
instance = BusRegistry.get(request.param)(bus_options)
def return_bus():
return instance
return (request.param, storagetype, return_bus)
def check_master_not_running():
# 'rebus_master' is too long - gets truncated
running = any(['rebus_master' in p.name() for p in psutil.process_iter()])
assert running is False, "rebus_master_dbus is already running"
def parse_arguments(agent_class, args):
"""
Returns a namespace containing parsed arguments for the requested agent.
:param args: list of arguments
"""
parser = argparse.ArgumentParser()
agent_class.add_agent_arguments(parser)
options, _ = parser.parse_known_args(args)
return options
@pytest.fixture(scope='function')
def agent_test(bus):
"""
Returns an instance of a test agent, registered to the bus and running in
another thread.
"""
bustype, storagetype, returnbus = bus
@Agent.register
class TestAgent(Agent):
_name_ = "testagent_%s_%s" % (bustype, storagetype)
_desc_ = "Accepts any input. Records received selectors, descriptors"
received_selectors = []
processed_descriptors = []
def selector_filter(self, selector):
self.received_selectors.append(selector)
return True
def process(self, desc, sender_id):
self.processed_descriptors.append((desc, sender_id))
namespace = parse_arguments(TestAgent, [])
agent = TestAgent(bus=returnbus(), domain='default', options=namespace)
return agent
@pytest.fixture(scope='function')
def agent_inject(bus, request):
bustype, storagetype, returnbus = bus
if bustype == 'localbus':
bus_instance = returnbus()
agent_class = AgentRegistry.get('inject')
namespace = parse_arguments(agent_class, ['/bin/ls'])
agent = agent_class(bus=bus_instance, domain='default',
options=namespace)
return agent
elif bustype in ('rabbit', 'dbus'):
# Running two DBUS agents in the same process does not work yet -
# dbus signal handler related problem
returncode = subprocess.call(('rebus_agent', '--bus', bustype,
'inject', '/bin/ls'))
assert returncode == 0
return
@pytest.fixture(scope='function')
def agent_set(bus):
"""
Run predefined sets of agents on the bus. Check that they did not crash at
the end.
"""
# TODO
pass
@pytest.mark.parametrize('busname', ['dbus', 'rabbit'])
def test_master(busname):
"""
Run, then stop rebus_master_dbus
"""
check_master_not_running()
process = subprocess.Popen(['rebus_master', busname],
stderr=subprocess.PIPE, bufsize=0)
# wait for master bus to be ready
# TODO look into race condition. Another SIGINT handler?
time.sleep(2)
output = process.stderr.read(1)
process.send_signal(signal.SIGINT)
process.wait()
assert process.returncode == 0, output + process.stderr.read()
def test_inject(agent_set, agent_test, agent_inject):
"""
* Inject a file to the bus
* Check that is can be fetched from the bus interface
* Check that it has been received by the test agent
* Make sure no agent has thrown any exception
"""
bus_instance = agent_test.bus
t = threading.Thread(target=bus_instance.run_agents)
t.daemon = True
t.start()
# TODO cleanly make sure all agents from agentset have finished processing
time.sleep(1)
injected_value = open('/bin/ls', 'rb').read()
# Fetch using the bus interface, check value
# Find by selector regexp
selectors = bus_instance.find(agent_test.id, DEFAULT_DOMAIN,
'/binary/elf', 10)
assert len(selectors) > 0
# Get descriptor
descriptor = bus_instance.get(agent_test.id, DEFAULT_DOMAIN,
selectors[0])
assert descriptor.value == injected_value
assert descriptor.domain == 'default'
assert descriptor.agent == 'inject'
assert descriptor.label == 'ls'
assert descriptor.precursors == []
assert descriptor.selector.startswith('/binary/elf/%')
assert uuid.UUID(descriptor.uuid) is not None
assert descriptor.version == 0
# Get descriptor by version
descriptor_version = bus_instance.get(agent_test.id, DEFAULT_DOMAIN,
selectors[0].split('%')[0]+'~-1')
# force fetching descriptor value
assert descriptor_version.value == descriptor.value
assert descriptor_version == descriptor
assert descriptor == descriptor_version
# Find by value regexp
descriptors_byvalue = bus_instance.find_by_value(agent_test.id,
DEFAULT_DOMAIN, '/binary',
injected_value[0:4])
# Check UUID exists & can be found
assert descriptors_byvalue[0].value == descriptor.value
uuids = bus_instance.list_uuids(agent_test.id, DEFAULT_DOMAIN)
assert descriptor.uuid in uuids
descriptors_uuid = bus_instance.find_by_uuid(agent_test.id, DEFAULT_DOMAIN,
descriptor.uuid)
# Find by selector
bysel = bus_instance.find_by_selector(agent_test.id, DEFAULT_DOMAIN,
'/binary')
assert len(bysel) == 1
assert bysel[0].value == descriptor.value
# force fetching descriptor value
assert descriptors_uuid[0].value == descriptor.value
assert descriptors_uuid[0] == descriptor
# Check that it has been received by TestAgent
received = agent_test.received_selectors
processed = agent_test.processed_descriptors
assert received == selectors
# force fetching descriptor value
assert processed[0][0].value == descriptor.value
assert processed[0][0] == descriptor
|
11550270
|
import torch
from torch.autograd import Variable, Function
class ShakeDrop(Function):
@staticmethod
def forward(ctx, x, alpha, beta, death_rate, is_train):
gate = (torch.rand(1) > death_rate).numpy()
ctx.gate = gate
ctx.save_for_backward(x, alpha, beta)
if is_train:
if not gate:
y = alpha * x
else:
y = x
else:
y = x.mul(1 - (death_rate * 1.0))
return y
@staticmethod
def backward(ctx, grad_output):
x, alpha, beta = ctx.saved_variables
grad_x1 = grad_alpha = grad_beta = None
if ctx.needs_input_grad[0]:
if not ctx.gate:
grad_x = grad_output * beta
else:
grad_x = grad_output
return grad_x, grad_alpha, grad_beta, None, None
shake_drop = ShakeDrop.apply
def generate_alpha_beta_single(tensor_size, shake_config, is_cuda):
forward_shake, backward_shake, shake_image = shake_config
if forward_shake and not shake_image:
alpha = torch.rand(tensor_size).mul(2).add(-1)
elif forward_shake and shake_image:
alpha = torch.rand(tensor_size[0]).view(tensor_size[0], 1, 1, 1)
alpha.mul_(2).add_(-1) # alpha from -1 to 1
else:
alpha = torch.FloatTensor([0.5])
if backward_shake and not shake_image:
beta = torch.rand(tensor_size)
elif backward_shake and shake_image:
beta = torch.rand(tensor_size[0]).view(tensor_size[0], 1, 1, 1)
else:
beta = torch.FloatTensor([0.5])
if is_cuda:
alpha = alpha.cuda()
beta = beta.cuda()
return Variable(alpha), Variable(beta)
|
11550328
|
import os
import os.path
from skidl import *
def convert_libs(from_dir, to_dir):
lib_files = [l for l in os.listdir(from_dir) if l.endswith(lib_suffixes[KICAD])]
for lib_file in lib_files:
print(lib_file)
basename = os.path.splitext(lib_file)[0]
lib = SchLib(os.path.join(from_dir, lib_file), tool=KICAD)
lib.export(libname=basename, file=os.path.join(to_dir, basename + lib_suffixes[SKIDL]))
if __name__ == '__main__':
import skidl.libs
for lib_dir in lib_search_paths[KICAD]:
convert_libs(lib_dir, skidl.libs.__path__[0])
|
11550336
|
import time
from compas.datastructures import Mesh
import compas_fab
from compas_fab.backends import RosClient
from compas_fab.robots import CollisionMesh
from compas_fab.robots import PlanningScene
with RosClient() as client:
robot = client.load_robot()
scene = PlanningScene(robot)
assert robot.name == 'ur5'
# create collision object
mesh = Mesh.from_stl(compas_fab.get('planning_scene/cone.stl'))
cm = CollisionMesh(mesh, 'tip')
# attach it to the end-effector
group = robot.main_group_name
scene.attach_collision_mesh_to_robot_end_effector(cm, group=group)
# sleep a bit before removing the tip
time.sleep(1)
scene.reset()
|
11550368
|
from .dropout import BernoulliDropout, ConcreteDropout, GaussianDropout
from .padding import ZeroPadding3DChannels
__all__ = [
"BernoulliDropout",
"ConcreteDropout",
"GaussianDropout",
"ZeroPadding3DChannels",
]
|
11550407
|
import tvm
import tvm.relay as relay
import tvm.relay.testing as testing
from graphviz import Digraph
# from ..workloads.onnx_workloads import get_network_from_onnx
from workloads.torch_workloads import get_network_from_torch
import argparse
from tvm.relay.transform.utility.visualize import visualize_network
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Default type is string for argparse
parser.add_argument("-n", "--network", help="name of a neural network")
args = parser.parse_args()
is_missing_arg = not args.network
# is_missing_arg |= not args.target
if is_missing_arg:
parser.error('Make sure you input all arguments')
# mod = get_resnet_8()
# mod, _, _, _ = get_network_from_onnx(args.network, batch_size=1)
mod, _, _, _ = get_network_from_torch(args.network, batch_size=1)
visualize_network(mod["main"], args.network)
|
11550419
|
from machine import Pin, UART
import time
import sys
class Serial:
""" 使用ESP32的串口连接外部设备
"""
def __init__(self, id=2, baudrate=2000000, rx_pin=14, tx_pin=12, timeout=2000, timeout_char=10):
import select
self.uart = UART(id, baudrate, rx=rx_pin, tx=tx_pin,
timeout=timeout, rxbuf=4096)
self.poll = select.poll()
self.poll.register(self.uart, select.POLLIN)
def close(self):
self.uart.deinit()
def read(self, size=1):
data = self.uart.read(size)
if data == None:
data = b""
return data
# data = b""
# while len(data) < size:
# r = self.uart.read(size - len(data))
# if r:
# data += r
# return data
def write(self, data):
return self.uart.write(data)
def inWaiting(self):
res = self.poll.poll(0)
if res:
return 1
return 0
class RemoteException(Exception):
pass
def stdout_write_bytes(b):
b = b.replace(b"\x04", b"")
sys.stdout.write(b)
class REPL:
""" 使用串口调用远程micropython设备的REPL接口
Returns
-------
[type]
[]
Raises
------
RemoteException
[description]
"""
def __init__(self, serial):
self.serial = serial
self.debug = False
def close(self):
self.serial.close()
def read_until(self, min_num_bytes, ending, timeout=20, data_consumer=None):
# if data_consumer is used then data is not accumulated and the ending must be 1 byte long
assert data_consumer is None or len(ending) == 1
data = self.serial.read(min_num_bytes)
if data_consumer:
data_consumer(data)
timeout_count = 0
while True:
if data.endswith(ending):
break
elif self.serial.inWaiting() > 0:
new_data = self.serial.read(1)
if data_consumer:
data_consumer(new_data)
data = new_data
else:
data = data + new_data
timeout_count = 0
else:
timeout_count += 1
if timeout is not None and timeout_count >= 100 * timeout:
break
time.sleep(0.01)
return data
def enter_raw_repl(self, timeout=20, soft_rest=False):
# ctrl-C twice: interrupt any running program
self.serial.write(b"\r\n\x03\x03")
# flush input (without relying on serial.flushInput())
n = self.serial.inWaiting()
if n == 0: # 无响应,可能正处于RAW REPL模式,发送ctrl-B退出
self.serial.write(b"\x02")
while n > 0:
self.serial.read(n)
n = self.serial.inWaiting()
self.serial.write(b"\r\n\x01") # ctrl-A: enter raw REPL
data = self.read_until(1, b"raw REPL; CTRL-B to exit\r\n>", timeout)
if not data.endswith(b"raw REPL; CTRL-B to exit\r\n>"):
# print('ctrl-A:')
# print(data)
raise RemoteException("could not enter raw repl")
if soft_rest:
self.serial.write(b"\x04") # ctrl-D: soft reset
data = self.read_until(1, b"soft reboot\r\n", timeout)
if not data.endswith(b"soft reboot\r\n"):
raise RemoteException("could not enter raw repl")
# By splitting this into 2 reads, it allows boot.py to print stuff,
# which will show up after the soft reboot and before the raw REPL.
data = self.read_until(1, b"raw REPL; CTRL-B to exit\r\n", timeout)
if not data.endswith(b"raw REPL; CTRL-B to exit\r\n"):
# print('ctrl-D:')
# print(data)
raise RemoteException("could not enter raw repl")
# 执行一个REPL操作,以防止后续出错
self.serial.write(b'import os')
self.serial.write(b'\x04')
self.follow(10)
def exit_raw_repl(self):
self.serial.write(b"\r\x02") # ctrl-B: enter friendly REPL
def follow(self, timeout, data_consumer=None):
# wait for normal output
data = self.read_until(1, b"\x04", timeout=timeout,
data_consumer=data_consumer)
if not data.endswith(b"\x04"):
raise RemoteException("timeout waiting for first EOF reception")
data = data[:-1]
# wait for error output
data_err = self.read_until(1, b"\x04", timeout=timeout)
if not data_err.endswith(b"\x04"):
raise RemoteException("timeout waiting for second EOF reception")
data_err = data_err[:-1]
# return normal and error output
return data, data_err
def exec_raw_no_follow(self, command):
if isinstance(command, bytes):
command_bytes = command
else:
command_bytes = bytes(command, "utf8")
if self.debug:
print("K210 CMD: {:s}" .format(command_bytes)) # debug print
# check we have a prompt
data = self.read_until(1, b">")
if not data.endswith(b">"):
raise RemoteException("could not enter raw repl")
# write command
for i in range(0, len(command_bytes), 256):
self.serial.write(
command_bytes[i: min(i + 256, len(command_bytes))])
time.sleep(0.01)
self.serial.write(b"\x04")
# check if we could exec command
data = self.serial.read(2)
if data != b"OK":
raise RemoteException(
"could not exec command (response: %r)" % data)
def exec_raw(self, command, timeout=20, data_consumer=None):
self.exec_raw_no_follow(command)
return self.follow(timeout, data_consumer)
def eval(self, expression):
ret = self.exec_("print({0})".format(expression))
ret = ret.strip()
return ret
def exec_(self, command, data_consumer=None):
# print(command)
ret, ret_err = self.exec_raw(command, data_consumer=data_consumer)
if ret_err:
raise RemoteException("exception", ret, ret_err)
return ret
def exec_file(self, filename):
with open(filename, "rb") as f:
pyfile = f.read()
return self.exec_(pyfile)
# def get_time(self):
# t = str(self.eval("pyb.RTC().datetime()"), encoding="utf8")[1:-1].split(", ")
# return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])
def fs_ls(self, src):
cmd = (
"import uos\r\nfor f in uos.listdir(%s):\r\n"
" print('{:12} {}{}'.format(f[3] if len(f)>3 else 0,f[0],'/' if f[1]&0x4000 else ''))"
% (("'%s'" % src) if src else "")
)
# print(cmd) #debug
self.exec_(cmd, data_consumer=stdout_write_bytes)
def fs_cat(self, src, chunk_size=256):
cmd = (
"with open('%s') as f:\n while 1:\n"
" b=f.read(%u)\n if not b:break\n print(b,end='')" % (
src, chunk_size)
)
self.exec_(cmd, data_consumer=stdout_write_bytes)
def fs_get(self, src, dest, chunk_size=1024):
self.exec_("f=open('%s','rb')\nr=f.read" % src)
with open(dest, "wb") as f:
while True:
data = bytearray()
self.exec_("print(r(%u))" % chunk_size,data_consumer=lambda d: data.extend(d))
assert data[-3:] == b"\r\n\x04"
data = eval(str(data[:-3], "ascii"))
if data == bytearray(b""):
break
f.write(data)
self.exec_("f.close()")
def fs_put(self, src, dest, chunk_size=1024):
self.exec_("f=open('%s','wb')\nw=f.write" % dest)
with open(src, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
if sys.version_info < (3,):
self.exec_("w(b" + repr(data) + ")")
else:
self.exec_("w(" + repr(data) + ")")
self.exec_("f.close()")
def fs_mkdir(self, dir):
self.exec_("import uos\nuos.mkdir('%s')" % dir)
def fs_rmdir(self, dir):
self.exec_("import uos\nuos.rmdir('%s')" % dir)
def fs_rm(self, src):
self.exec_("import uos\nuos.remove('%s')" % src)
|
11550441
|
import os
from datetime import datetime
from time import sleep
from django.urls import reverse
from factories import UserFactory
from selenium.webdriver.common.keys import Keys
from competitions.models import Competition
from tasks.models import Task
from ..utils import SeleniumTestCase
class TestCompetitions(SeleniumTestCase):
def setUp(self):
super().setUp()
self.user = UserFactory(password='<PASSWORD>')
self.login(self.user.username, 'test')
def current_server_time_exists(self):
# Get server time element
element = self.find('#server_time')
text = element.get_attribute('innerText')
# Check that the text is a valid datetime by loading it with strptime.
# This will raise a ValueError if the format is incorrect.
assert datetime.strptime(text, '%B %d, %Y, %I:%M %p %Z')
def _upload_competition(self, competition_zip_path):
"""Creates a competition and waits for success message.
:param competition_zip_path: Relative to test_files/ dir
"""
self.get(reverse('competitions:upload'))
self.find('input[ref="file_input"]').send_keys(os.path.join(self.test_files_dir, competition_zip_path))
self.circleci_screenshot(name='uploading_comp.png')
assert self.element_is_visible('div .ui.success.message')
comp = self.user.competitions.first()
comp_url = reverse("competitions:detail", kwargs={"pk": comp.id})
self.find(f'a[href="{comp_url}"]').click()
self.assert_current_url(comp_url)
self.current_server_time_exists()
def test_upload_v15_competition(self):
self._upload_competition('competition_15.zip')
def test_upload_v18_competition(self):
self._upload_competition('competition_18.zip')
def test_upload_v2_competition(self):
self._upload_competition('competition.zip')
def test_manual_competition_creation(self):
# Dataset Creation
self.find('i[selenium="tasks"]').click()
self.find('div[data-tab="datasets"]').click()
self.find('i[selenium="add-dataset"]').click()
self.find('input-text[selenium="scoring-name"] input').send_keys('sCoRiNg NaMe')
self.find('input-text[selenium="scoring-desc"] input').send_keys('sCoRiNg DeScRiPtItIoN')
self.execute_script('$("select[selenium=\'type\']").dropdown("set selected", "scoring_program")')
self.find('input-file[selenium="file"] input').send_keys(os.path.join(self.test_files_dir, 'scoring_program.zip'))
self.find('i[selenium="upload"]').click()
sleep(2)
# Task Creation
self.find('div[data-tab="tasks"]').click()
self.find('div[selenium="create-task"]').click()
self.find('input[selenium="name2"]').send_keys('nAmE')
self.find('textarea[selenium="task-desc"]').send_keys('textbox')
self.find('div[data-tab="data"]').click()
self.find('input[id="scoring_program"]').send_keys('sco')
sleep(.5)
self.execute_script('$("div[selenium=\'scoring-program\'] a")[0].click()')
self.find('div[selenium="save-task"]').click()
# Details Tab
competition_title = "selenium_test_comp"
self.get(reverse('competitions:create'))
self.find('input[ref="title"]').send_keys(competition_title)
self.find('input[ref="logo"]').send_keys(os.path.join(self.test_files_dir, 'test_logo.png'))
self.find('input[ref="docker_image"]').send_keys('docker_image')
# Participation Tab
self.find('a[data-tab="participation"]').click()
self.execute_script('$("textarea[ref=\'terms\']")[0].EASY_MDE.value("pArTiCiPaTe")')
sleep(2)
self.find('input[selenium="auto-approve"]').click()
# Pages Tab
self.find('a[data-tab="pages"]').click()
self.find('i[class="add icon"]').click()
self.find('input[selenium="title"]').send_keys('Title')
self.execute_script('$("textarea[ref=\'content\']")[0].EASY_MDE.value("Testing123")')
self.find('div[selenium="save1"]').click()
sleep(1)
# Phases Tab
self.find('a[data-tab="phases"]').click()
self.find('i[selenium="add-phase"]').click()
sleep(1)
self.find('form[selenium="phase-form"] input[name="name"]').send_keys('Name')
sleep(.1)
self.find('input[name="start"]').click()
self.find('input[name="start"]').send_keys(2)
self.find('input[name="start"]').send_keys(Keys.ENTER)
self.find('input[name="end"]').send_keys(3)
self.find('input[name="end"]').send_keys(Keys.ENTER)
self.find('label[for="tasks"]').click()
sleep(.1)
self.find("form[selenium='phase-form'] input.search").send_keys("Wheat")
sleep(.1)
tasks = Task.objects.all()
import random
random_task = random.choice(tasks)
task = random_task.key
self.find(f"form[selenium='phase-form'] .menu .item[data-value='{task}']").click()
self.execute_script('$("textarea[ref=\'description\']")[0].EASY_MDE.value("Testing123")')
self.find('form[selenium="phase-form"] input[name="name"]').send_keys('Name')
sleep(1)
self.find('div[selenium="save2"]').click()
sleep(1)
# Leaderboard Tab
leaderboard_title = 'tItLe'
self.find('a[data-tab="leaderboard"]').click()
self.find('i[selenium="add-leaderboard"]').click()
self.find('input[selenium="title1"]').send_keys(leaderboard_title)
self.find('input[selenium="key"]').send_keys('kEy')
self.find('div[selenium="add-column"]').click()
sleep(1)
self.find('input[selenium="column-key"]').send_keys('cOlUmN kEy')
self.find('input[selenium="hidden"]').click()
self.find('div[selenium="save3"]').click()
sleep(2)
assert not Competition.objects.filter(title=competition_title).exists()
self.find('button[selenium="save4"]').click()
sleep(1)
assert Competition.objects.filter(title=competition_title).exists()
|
11550447
|
import sys
import gzip
import itertools as it
import numpy as np
import scipy.stats as ss
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
fha = (gzip.open if sys.argv[1].endswith(".gz") else open)(sys.argv[1])
fhb = (gzip.open if sys.argv[2].endswith(".gz") else open)(sys.argv[2])
LCR = len(sys.argv) > 3 and sys.argv[3] == "LCR"
def gen(fh):
for line in fh:
toks = line.rstrip().split("\t")
toks[1], toks[2] = int(toks[1]), int(toks[2])
toks[3] = float(toks[3])
yield toks
xs, ys = [], []
lcr = []
ras = []
rbs = []
for i, (a, b) in enumerate(it.izip(gen(fha), gen(fhb))):
if a[1] != b[1]:
raise Exception("expected same positions for both files")
xs.append(a[3])
ys.append(b[3])
if LCR:
assert b[4] == a[4]
lcr.append(b[4] != '0')
ras.append(a)
rbs.append(b)
#if not lcr[-1]:
# print(abs(xs[-1] - ys[-1]), lcr[-1])
plt.rc('ytick', labelsize=16)
plt.rc('xtick', labelsize=16)
fig, axes = plt.subplots(1, figsize=(18, 14))
axes = (axes,)
ras, rbs = np.array(ras), np.array(rbs)
lcr = np.array(lcr)
ys = np.array(ys)
ys /= np.median(ys)
xs = np.array(xs)
if LCR:
xs = xs[~lcr]
ys = ys[~lcr]
ras = ras[~lcr]
rbs = rbs[~lcr]
diff = xs - ys
print diff[np.abs(diff) > 0.5]
for a, b, d, sc in zip(ras[np.abs(diff)>0.5].tolist(),
rbs[np.abs(diff)>0.5].tolist(), diff[np.abs(diff) > 0.5],
ys[np.abs(diff) > 0.5]):
print a, b, d, sc
out = sum(abs(d) > 0.5 for d in diff)
print "out:", out, "total:", len(diff), ("%.2f" % (100.0*out/len(diff)))
print "max diff:", np.abs(diff).max()
print "corr:", np.corrcoef(xs, ys)[0, 1]
from scipy.stats import spearmanr
print "spearman corr:", spearmanr(xs, ys)[0]
print sum(abs(d) < 0.25 for d in diff) / float(len(diff))
print sum(abs(d) < 0.1 for d in diff) / float(len(diff))
sdiff = diff[np.abs(diff) < 0.5]
axes[0].hist(sdiff, 40)
axes[0].set_xlim(-0.5, 0.5)
axes[0].set_xlabel("Difference in depth estimate (indexcov - samtools)",
fontsize=20)
axes[0].set_ylabel("Number of Tiles", fontsize=20)
out = (np.abs(diff) > 0.5).sum()
#ax = axes[0]
#for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# label.set_fontsize(15)
d = "/uufs/chpc.utah.edu/common/home/u6000771/public_html/"
plt.savefig(d + "figure-1.eps")
plt.show()
|
11550464
|
import tensorflow as tf
hparams = tf.contrib.training.HParams(
num_mels=80,
frame_length_ms=50,
frame_shift_ms=12.5,
hop_length=int(16000 * 0.0125), # samples.
win_length=int(16000 * 0.05), # samples.
max_db=100,
ref_db=20,
preemphasis=0.97,
max_abs_value=4.0,
symmetric_mel=True,
sr=16000,
n_fft=2048,
n_iter=60,
power=1.5,
max_generation_frames=1100,
max_eval_batches=20,
max_eval_sample_length=1000,
eval_sample_per_speaker=4,
vocab_size=6000,
embed_size=512,
encoder_hidden=512,
decoder_hidden=768,
n_encoder_layer=6,
n_decoder_layer=6,
n_attention_head=8,
transformer_dropout_rate=0.1,
decoder_dropout_rate=0.5,
prenet_hidden=256,
postnet_hidden=512,
n_postnet_layer=5,
data_format="nlti",
use_sos=True,
bucket_size=512,
shuffle_training_data=True,
batch_frame_limit=8000,
batch_frame_quad_limit=7000000,
balanced_training=True,
lg_prob_scale=0.2,
adapt_start_step=30000,
adapt_end_step=30000,
final_adapt_rate=0.25,
data_warmup_steps=30000,
target_length_lower_bound=240,
target_length_upper_bound=800,
reg_weight=5e-9,
multi_speaker=True,
max_num_speaker=1000,
speaker_embedding_size=128,
multi_lingual=True,
max_num_language=100,
language_net_hidden=128,
language_embedding_size=128,
warmup_steps=50000,
max_lr=1e-3,
min_lr=1e-5,
lr_decay_step=550000,
lr_decay_rate=1e-2,
adam_eps=5e-8,
external_embed_dim=1024,
use_external_embed=False,
)
|
11550466
|
import framework
from models.encoder_decoder import add_eos
from typing import Tuple, Dict, Any, Optional, Callable, List
import torch
import torch.nn.functional as F
class SequenceTestState:
def __init__(self, batch_dim: int = 1):
self.n_ok = 0
self.n_total = 0
self.batch_dim = batch_dim
self.time_dim = 1 - self.batch_dim
def is_index_tensor(self, net_out: torch.Tensor) -> bool:
return net_out.dtype in [torch.long, torch.int, torch.int8, torch.int16]
def convert_to_index(self, net_out: torch.Tensor):
return net_out if self.is_index_tensor(net_out) else net_out.argmax(-1)
def compare_direct(self, net_out: Tuple[torch.Tensor, Optional[torch.Tensor]], ref: torch.Tensor,
ref_len: torch.Tensor):
scores, len = net_out
out = self.convert_to_index(scores)
if len is not None:
# Dynamic-length output
if out.shape[0] > ref.shape[0]:
out = out[: ref.shape[0]]
elif out.shape[0] < ref.shape[0]:
ref = ref[: out.shape[0]]
unused = torch.arange(0, out.shape[0], dtype=torch.long, device=ref.device).unsqueeze(self.batch_dim) >= \
ref_len.unsqueeze(self.time_dim)
ok_mask = ((out == ref) | unused).all(self.time_dim) & (len == ref_len)
else:
# Allow fixed lenght output
assert out.shape==ref.shape
ok_mask = (out == ref).all(self.time_dim)
return ok_mask
def compare_output(self, net_out: Tuple[torch.Tensor, Optional[torch.Tensor]], data: Dict[str, torch.Tensor]):
return self.compare_direct(net_out, data["out"], data["out_len"])
def step(self, net_out: Tuple[torch.Tensor, Optional[torch.Tensor]], data: Dict[str, torch.Tensor]):
ok_mask = self.compare_output(net_out, data)
self.n_total += ok_mask.nelement()
self.n_ok += ok_mask.long().sum().item()
@property
def accuracy(self):
return self.n_ok / self.n_total
def plot(self) -> Dict[str, Any]:
return {"accuracy/total": self.accuracy}
class TextSequenceTestState(SequenceTestState):
def __init__(self, input_to_text: Callable[[torch.Tensor], torch.Tensor],
output_to_text: Callable[[torch.Tensor], torch.Tensor], batch_dim: int = 1,
max_bad_samples: int = 100, min_prefix_match_len: int = 1, eos_id: int = -1):
super().__init__(batch_dim)
self.bad_sequences = []
self.max_bad_samples = max_bad_samples
self.in_to_text = input_to_text
self.out_to_text = output_to_text
self.n_prefix_ok = 0
self.n_oracle_ok = 0
self.oracle_available = False
self.min_prefix_match_len = min_prefix_match_len
self.eos_id = eos_id
self.losses = []
self.oks = []
def set_eos_to_neginf(self, scores: torch.Tensor) -> torch.Tensor:
id = self.eos_id if self.eos_id >= 0 else (scores.shape[-1] + self.eos_id)
return scores.index_fill(-1, torch.tensor([id], device=scores.device), float("-inf"))
def loss(self, net_out: torch.Tensor, data: Dict[str, torch.Tensor]) -> torch.Tensor:
mask = torch.arange(net_out.shape[1-self.batch_dim], device=net_out.device).unsqueeze(1) <= \
data["out_len"].unsqueeze(0)
ref = add_eos(data["out"], data["out_len"], net_out.shape[-1] - 1)
l = F.cross_entropy(net_out.flatten(end_dim=-2), ref.long().flatten(), reduction='none')
l = l.reshape_as(ref) * mask
nonbatchdims = tuple(i for i in range(l.ndim) if i!=self.batch_dim)
l = l.sum(dim=nonbatchdims) / mask.sum(dim=nonbatchdims).float()
return l
def sample_to_text(self, net_out: Tuple[torch.Tensor, Optional[torch.Tensor]], data: Dict[str, torch.Tensor],
i: int) -> Tuple[str, str, str]:
scores, out_len = net_out
out = self.convert_to_index(scores)
t_ref = self.out_to_text(data["out"].select(self.batch_dim, i)[: int(data["out_len"][i].item())].
cpu().numpy().tolist())
out_end = None if out_len is None else out_len[i].item()
t_out = self.out_to_text(out.select(self.batch_dim, i)[:out_end].cpu().numpy().tolist())
t_in = self.in_to_text(data["in"].select(self.batch_dim, i)[: int(data["in_len"][i].item())].cpu().numpy().
tolist())
return t_in, t_ref, t_out
def step(self, net_out: Tuple[torch.Tensor, Optional[torch.Tensor]], data: Dict[str, torch.Tensor]):
ok_mask = self.compare_output(net_out, data)
scores, _ = net_out
if not self.is_index_tensor(scores):
self.oracle_available = True
out_noeos = self.set_eos_to_neginf(scores).argmax(-1)
oracle_ok = self.compare_direct((out_noeos, data["out_len"].clamp_(max=out_noeos.shape[1-self.batch_dim])),
data["out"], data["out_len"])
self.n_oracle_ok += oracle_ok.long().sum().item()
self.losses.append(self.loss(net_out[0], data).cpu())
prefix_len = data["out_len"] if net_out[1] is None else torch.minimum(data["out_len"], net_out[1])
prefix_len = torch.minimum(prefix_len.clamp(min=self.min_prefix_match_len), data["out_len"])
prefix_ok_mask = self.compare_direct((net_out[0], prefix_len), data["out"], prefix_len)
if len(self.bad_sequences) < self.max_bad_samples:
t = torch.nonzero(~ok_mask).squeeze(-1)[:self.max_bad_samples - len(self.bad_sequences)]
for i in t:
t_in, t_ref, t_out = self.sample_to_text(net_out, data, i)
s = [t_in, t_ref, t_out, str(prefix_ok_mask[i].item())]
if self.oracle_available:
s.append(str(oracle_ok[i].item()))
self.bad_sequences.append(s)
self.oks.append(ok_mask.cpu())
self.n_total += ok_mask.nelement()
self.n_ok += ok_mask.long().sum().item()
self.n_prefix_ok += prefix_ok_mask.long().sum().item()
def get_sample_info(self) -> Tuple[List[float], List[bool]]:
return torch.cat(self.losses, 0).numpy().tolist(), torch.cat(self.oks, 0).numpy().tolist()
def plot(self) -> Dict[str, Any]:
res = super().plot()
res["mistake_examples"] = framework.visualize.plot.TextTable(["Input", "Reference", "Output", "Prefix match"] +\
(["Oracle match"] if self.oracle_available else []),
self.bad_sequences)
res["accuracy/prefix"] = self.n_prefix_ok / self.n_total
if self.oracle_available:
res["accuracy/oracle"] = self.n_oracle_ok / self.n_total
if self.losses:
res["loss_histogram"] = framework.visualize.plot.Histogram(torch.cat(self.losses, 0))
return res
class TypedTextSequenceTestState(TextSequenceTestState):
def __init__(self, input_to_text: Callable[[torch.Tensor], torch.Tensor],
output_to_text: Callable[[torch.Tensor], torch.Tensor], type_names: List[str], batch_dim: int = 1,
max_bad_samples: int = 100):
super().__init__(input_to_text, output_to_text, batch_dim, max_bad_samples)
self.type_names = type_names
self.count_per_type = {}
def step(self, net_out: Tuple[torch.Tensor, Optional[torch.Tensor]], data: Dict[str, torch.Tensor]):
ok_mask = self.compare_output(net_out, data)
scores, out_len = net_out
out = self.convert_to_index(scores)
if len(self.bad_sequences) < self.max_bad_samples:
t = torch.nonzero(~ok_mask).squeeze(-1)[:self.max_bad_samples - len(self.bad_sequences)]
for i in t:
out_end = None if out_len is None else out_len[i].item()
self.bad_sequences.append((
self.in_to_text(data["in"].select(self.batch_dim, i)[: int(data["in_len"][i].item())].
cpu().numpy().tolist()),
self.out_to_text(data["out"].select(self.batch_dim, i)[: int(data["out_len"][i].item())].
cpu().numpy().tolist()),
self.type_names[int(data["type"][i].item())],
self.out_to_text(out.select(self.batch_dim, i)[:out_end].cpu().numpy().tolist())
))
for t in torch.unique(data["type"]).int().cpu().numpy().tolist():
mask = data["type"] == t
c = self.count_per_type.get(t)
if c is None:
self.count_per_type[t] = c = {"n_ok": 0, "n_total": 0}
c["n_total"] += mask.float().sum().item()
c["n_ok"] += ok_mask[mask].float().sum().item()
self.n_total += ok_mask.nelement()
self.n_ok += ok_mask.long().sum().item()
def plot(self) -> Dict[str, Any]:
res = super().plot()
res["mistake_examples"] = framework.visualize.plot.TextTable(["Input", "Reference", "Type", "Output"],
self.bad_sequences)
for t, data in self.count_per_type.items():
res[f"accuracy/{self.type_names[t]}"] = data["n_ok"] / data["n_total"]
return res
|
11550472
|
with open("data/M_ff/train_his.csv") as f:
line = f.readline().strip()
headers = line.split(',')
feat_info_fn = "data/M_ff/his_feat_infos.txt"
from data_tool.feature import FeatureInfo
feat_infos = {name:FeatureInfo() for name in headers}
for k,v in feat_infos.items():
if k in ["pos_his","neg_his"]:
v.construct(k,1005)
else:
v.construct(k,2951)
with open(feat_info_fn,'w') as f:
for k,v in feat_infos.items():
f.write(v.to_str())
|
11550493
|
import json
import os
import unittest
from shutil import rmtree
import numpy as np
import torch
from metal.end_model import EndModel
class LogWriterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Set seed
np.random.seed(1)
n = 2000
X = np.random.random((n, 2)) * 2 - 1
Y = (X[:, 0] > X[:, 1] + 0.25).astype(int) + 1
X = torch.tensor(X, dtype=torch.float)
Y = torch.tensor(Y, dtype=torch.long)
Xs = [X[:1000], X[1000:1500], X[1500:]]
Ys = [Y[:1000], Y[1000:1500], Y[1500:]]
cls.single_problem = (Xs, Ys)
cls.log_dir = "tests/logs/"
@classmethod
def tearDownClass(cls):
print("TODO: Confirm that this is deleting logs directory")
# Clean up
rmtree(cls.log_dir)
def test_logwriter(self):
"""Test the basic LogWriter class"""
writer_kwargs = {
"log_dir": self.log_dir,
"run_dir": "test_dir",
"run_name": "test",
}
em = EndModel(
seed=1,
input_batchnorm=False,
middle_batchnorm=False,
input_dropout=0.0,
middle_dropout=0.0,
layer_out_dims=[2, 10, 2],
verbose=False,
)
Xs, Ys = self.single_problem
em.train_model(
(Xs[0], Ys[0]),
valid_data=(Xs[1], Ys[1]),
n_epochs=7,
checkpoint=False,
writer="json",
**writer_kwargs,
)
# Load the log
with open(em.writer.log_path, "r") as f:
log_dict = json.load(f)
self.assertEqual(log_dict["config"]["train_config"]["n_epochs"], 7)
self.assertEqual(len(log_dict["run_log"]["train/loss"]), 7)
def test_tensorboard(self):
"""Test the TensorBoardWriter class"""
pass
# log_dir = os.path.join(self.log_dir, "tensorboard")
# writer_kwargs = {"log_dir": log_dir, "run_dir": "test_dir", "run_name": "test"}
# em = EndModel(
# seed=1,
# input_batchnorm=False,
# middle_batchnorm=False,
# input_dropout=0.0,
# middle_dropout=0.0,
# layer_out_dims=[2, 10, 2],
# verbose=False,
# )
# Xs, Ys = self.single_problem
# em.train_model(
# (Xs[0], Ys[0]),
# valid_data=(Xs[1], Ys[1]),
# n_epochs=2,
# checkpoint=False,
# writer="tensorboard",
# **writer_kwargs,
# )
# # Load the log
# with open(em.writer.log_path, "r") as f:
# pass
# # Confirm that the event file was written
# self.assertTrue(False)
|
11550506
|
from typing import Any
from hypothesis import assume, given
from pfun import Unary, compose, identity
from pfun.either import Either, Left, Right, either, filter_, for_each, gather
from pfun.hypothesis_strategies import anything, eithers, unaries
from tests.monad_test import MonadTest
from .utils import recursion_limit
class TestEither(MonadTest):
@given(eithers(anything()))
def test_right_identity_law(self, either: Either):
assert either.and_then(Right) == either
@given(anything(), unaries(eithers(anything())))
def test_left_identity_law(self, value, f: Unary[Any, Either]):
assert Right(value).and_then(f) == f(value)
@given(
eithers(anything()),
unaries(eithers(anything())),
unaries(eithers(anything()))
)
def test_associativity_law(
self, either: Either, f: Unary[Any, Either], g: Unary[Any, Either]
):
assert either.and_then(f).and_then(
g
) == either.and_then( # type: ignore
lambda x: f(x).and_then(g)
)
@given(anything())
def test_equality(self, value):
assert Left(value) == Left(value)
assert Right(value) == Right(value)
@given(anything(), anything())
def test_inequality(self, first, second):
assume(first != second)
assert Left(first) != Left(second)
assert Right(first) != Right(second)
assert Left(first) != Right(first)
@given(anything())
def test_identity_law(self, value):
assert Left(value).map(identity) == Left(value)
assert Right(value).map(identity) == Right(value)
@given(unaries(anything()), unaries(anything()), anything())
def test_composition_law(self, f: Unary, g: Unary, value):
h = compose(f, g)
assert Left(value).map(h) == Left(value).map(g).map(f)
assert Right(value).map(h) == Right(value).map(g).map(f)
@given(anything(), anything())
def test_or_else(self, value, default):
assert Right(value).or_else(default) == value
assert Left(value).or_else(default) == default
@given(anything())
def test_bool(self, value):
assert bool(Right(value))
assert not bool(Left(value))
def test_either_decorator(self):
result_int = either(int)
assert result_int('1') == Right(1)
def test_gather(self):
assert gather([Right(v) for v in range(3)]) == Right((0, 1, 2))
def test_stack_safety(self):
with recursion_limit(100):
gather([Right(v) for v in range(500)])
def test_filter(self):
assert filter_(lambda v: Right(v % 2 == 0), range(3)) == Right((0, 2))
def test_for_each(self):
assert for_each(Right, range(3)) == Right((0, 1, 2))
|
11550534
|
import os
import requests
class TelegramClient:
chat_id = os.getenv('TELEGRAM_CHAT_ID')
username = os.getenv('TELEGRAM_USERNAME')
password = os.getenv('TELEGRAM_PASSWORD')
url = f'https://api.telegram.org/{username}:{password}'
def send_message(self, message):
requests.post(self.url + f'/sendMessage?chat_id={self.chat_id}&text=' + message)
|
11550537
|
from mmdet.utils import Registry
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
|
11550555
|
import maya.cmds as mc
#testFile = '/home/prthlein/private/code/prmaya/test/scripts/test_prDeformPaint.ma'
testFile = r'C:\Users\paz\Documents\git\prmaya\test\scripts\test_prDeformPaint.ma'
mc.file(testFile, open=True, force=True)
mc.file(rename=testFile.replace('.ma', 'TEMP.ma'))
mc.file(renameToSave=True)
import prDeformPaint;reload(prDeformPaint)
ui = prDeformPaint.Ui()
mc.select('orig')
ui.enterTool()
mc.select('half')
prDeformPaint.reinitializeMaya()
#prDeformPaint.initializeMaya('/home/prthlein/private/code/prmaya/prmaya/plugins/prMovePointsCmd.py',
# '/home/prthlein/private/code/prmaya/prmaya/scripts/prDeformPaintBrush.mel')
#prDeformPaint.reinitializeMaya()
|
11550564
|
from datetime import datetime
from pathlib import Path
from scalpel import Configuration, datetime_decoder
from scalpel.green import SeleniumSpider, SeleniumResponse, read_mp
def parse(spider: SeleniumSpider, response: SeleniumResponse) -> None:
for block in response.driver.find_elements_by_xpath('//div[@class="opblock-tag-section"]'):
block.click()
h4_text = block.find_element_by_xpath('./h4').text
title, description = h4_text.split('\n')
result = {
'title': title,
'description': description,
'operations': []
}
methods = (method.text for method in block.find_elements_by_xpath('.//span[@class="opblock-summary-method"]'))
paths = (path.text for path in block.find_elements_by_xpath('.//span[@class="opblock-summary-path"]/a/span'))
descriptions = (description.text for description in
block.find_elements_by_xpath('.//div[@class="opblock-summary-description"]'))
for method, path, description in zip(methods, paths, descriptions):
result['operations'].append({
'method': method,
'path': path,
'description': description
})
spider.save_item(result)
def date_processor(item: dict) -> dict:
item['date'] = datetime.now()
return item
if __name__ == '__main__':
backup = Path(__file__).parent / 'backup.mp'
config = Configuration(selenium_driver_log_file=None, backup_filename=f'{backup}', item_processors=[date_processor])
sel_spider = SeleniumSpider(urls=['http://httpbin.org/'], parse=parse, config=config)
sel_spider.run()
print(sel_spider.statistics())
# you can do whatever you want with the results
for quote_data in read_mp(filename=backup, decoder=datetime_decoder):
print('****', quote_data['title'], '****')
print(quote_data['description'])
print('== operations ==')
for operation in quote_data['operations']:
print('\tmethod:', operation['method'])
print('\tpath:', operation['path'])
print('\tdescription:', operation['description'], end='\n\n')
|
11550568
|
from django.http import HttpResponse
from ninja import NinjaAPI
api = NinjaAPI()
@api.get("")
def index(request):
return HttpResponse(status=200)
@api.get("user/{id}")
async def get_user(request, id: str):
return HttpResponse(id)
@api.post("user")
async def create_user(request):
return HttpResponse(status=200)
|
11550582
|
import pybullet as pb
from . import body
from . import link
def get_contact_points(body_or_link_a, body_or_link_b=None):
""" Returns the contact points computed during the most recent
call to stepSimulation. """
client_id = body_or_link_a.client_id
kwargs = {}
kwargs.update(_compute_args(body_or_link_a, 'bodyA', 'linkIndexA'))
if body_or_link_b is not None:
kwargs.update(_compute_args(body_or_link_b, 'bodyB', 'linkIndexB'))
pts = pb.getContactPoints(physicsClientId=client_id, **kwargs)
return [ContactPoint(p, body_or_link_a.client_id) for p in pts]
def get_closest_points(body_or_link_a, body_or_link_b=None, max_distance=10):
""" Compute the closest points, independent from stepSimulation.
If the distance between objects exceeds this maximum distance,
no points may be returned. """
client_id = body_or_link_a.client_id
kwargs = {}
kwargs.update(_compute_args(body_or_link_a, 'bodyA', 'linkIndexA'))
if body_or_link_b is not None:
kwargs.update(_compute_args(body_or_link_b, 'bodyB', 'linkIndexB'))
pts = pb.getClosestPoints(distance=max_distance, physicsClientId=client_id, **kwargs)
return [Distance(p, client_id) for p in pts]
def get_overlapping_objects(body_or_link):
""" Return all the unique ids of objects that have axis aligned
bounding box overlap with a axis aligned bounding box of
given body/link. """
client_id = body_or_link.client_id
kwargs = _compute_args(body_or_link, 'bodyUniqueId', 'linkIndex')
aa, bb = pb.getAABB(physicsClientId=client_id, **kwargs)
obs = pb.getOverlappingObjects(aa, bb, physticsClient=client_id)
if obs is None:
return []
return [link.Link(o[0], o[1]) for o in obs]
def get_collisions(body_or_link):
""" Return all objects that intersect a given body/link. """
# getOverlappingObjects doesnt work properly each time
# overlap = get_overlapping_objects(link.Link(1,1))
overlap = range(body.Body.num_bodies(body_or_link.client_id))
return sum([get_closest_points(body_or_link, b, 0.0) for b in overlap
if b != body_or_link.body_id], [])
def _compute_args(body_or_link, body_arg, link_arg):
if isinstance(body_or_link, link.Link):
return {body_arg: body_or_link.body_id,
link_arg: body_or_link.link_index}
elif isinstance(body_or_link, body.Body):
return {body_arg: body_or_link.body_id}
else:
return {body_arg: body_or_link}
class Distance(object):
def __init__(self, data, client_id):
self._data = data
self.client_id = client_id
@property
def body_a(self):
""" Body A. """
return body.Body(self._data[1], self.client_id)
@property
def body_b(self):
""" Body B. """
return body.Body(self._data[2], self.client_id)
@property
def link_a(self):
""" Link of body A. """
return link.Link(self._data[1], self._data[3], self.client_id)
@property
def link_b(self):
""" Link of body B. """
return link.Link(self._data[2], self._data[4], self.client_id)
@property
def position_on_a(self):
""" Contact position on A, in Cartesian world coordinates (vec3). """
return self._data[5]
@property
def position_on_b(self):
""" Contact position on B, in Cartesian world coordinates (vec3). """
return self._data[6]
@property
def distance(self):
""" Distance, positive for separation, negative for penetration (float). """
return self._data[8]
def __repr__(self):
return 'Distance {}-{}: {:.3}'.format(
self.link_a, self.link_b, self.distance)
class ContactPoint(Distance):
def __init__(self, data, client_id):
super(ContactPoint, self).__init__(data, client_id)
@property
def contact_normal_on_b(self):
""" Contact normal on B, pointing towards A (vec3). """
return self._data[7]
@property
def normal_force(self):
""" Normal force applied during the last 'stepSimulation' (float). """
return self._data[9]
|
11550584
|
import unittest
from simdna.simdnautil import fileProcessing as fp
import simdna
from simdna import synthetic as sn
import numpy as np
from simdna import random
from collections import defaultdict
class TestBasics(unittest.TestCase):
def test_density_motif_embedding(self):
random.seed(1234)
np.random.seed(1234)
min_counts = 2
max_counts = 5
pseudocount_prob = 0.001
pwm_name = "CTCF_known1"
num_sequences = 5000
loaded_motifs = sn.LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
pseudocountProb=pseudocount_prob)
substring_generator = sn.PwmSamplerFromLoadedMotifs(
loaded_motifs, pwm_name)
position_generator = sn.UniformPositionGenerator()
quantity_generator = sn.UniformIntegerGenerator(min_counts, max_counts)
embedders = [
sn.RepeatedEmbedder(
sn.SubstringEmbedder(
sn.ReverseComplementWrapper(
substring_generator), position_generator),
quantity_generator)]
embed_in_background = sn.EmbedInABackground(
sn.ZeroOrderBackgroundGenerator(
500, discreteDistribution={'A':0.3,'C':0.2,
'G':0.2,'T':0.3}),
embedders)
generated_sequences = list(sn.GenerateSequenceNTimes(
embed_in_background, num_sequences).generateSequences())
assert len(generated_sequences) == num_sequences
actual_pwm = np.array([[0.095290, 0.318729, 0.083242, 0.502738],
[0.182913, 0.158817, 0.453450, 0.204819],
[0.307777, 0.053669, 0.491785, 0.146769],
[0.061336, 0.876232, 0.023001, 0.039430],
[0.008762, 0.989047, 0.000000, 0.002191],
[0.814896, 0.014239, 0.071194, 0.099671],
[0.043812, 0.578313, 0.365827, 0.012048],
[0.117325, 0.474781, 0.052632, 0.355263],
[0.933114, 0.012061, 0.035088, 0.019737],
[0.005488, 0.000000, 0.991218, 0.003293],
[0.365532, 0.003293, 0.621295, 0.009879],
[0.059276, 0.013172, 0.553238, 0.374314],
[0.013187, 0.000000, 0.978022, 0.008791],
[0.061538, 0.008791, 0.851648, 0.078022],
[0.114411, 0.806381, 0.005501, 0.073707],
[0.409241, 0.014301, 0.557756, 0.018702],
[0.090308, 0.530837, 0.338106, 0.040749],
[0.128855, 0.354626, 0.080396, 0.436123],
[0.442731, 0.199339, 0.292952, 0.064978]])
actual_pwm = actual_pwm*(1-pseudocount_prob) + pseudocount_prob/4
np.testing.assert_almost_equal(np.sum(actual_pwm,axis=-1),1.0,6)
np.testing.assert_almost_equal(
actual_pwm,
np.array(loaded_motifs.getPwm(pwm_name).getRows()))
letter_to_index = {'A':0, 'C':1, 'G':2, 'T':3}
reconstructed_pwm_fwd = np.zeros_like(actual_pwm)
reconstructed_pwm_rev = np.zeros_like(actual_pwm)
quantity_distribution = defaultdict(lambda: 0)
total_fwd_embeddings = 0.0
total_rev_embeddings = 0.0
for seq in generated_sequences:
embeddings = seq.embeddings
quantity_distribution[len(embeddings)] += 1
for embedding in embeddings:
assert (embedding.what.string
==seq.seq[embedding.startPos:
embedding.startPos+len(embedding.what.string)])
if ('revComp' in embedding.what.getDescription()):
total_rev_embeddings += 1
else:
total_fwd_embeddings += 1
for char_idx, char in enumerate(embedding.what.string):
if ('revComp' in embedding.what.getDescription()):
arr = reconstructed_pwm_rev
else:
arr = reconstructed_pwm_fwd
arr[char_idx][letter_to_index[char]] += 1
total_embeddings = total_fwd_embeddings + total_rev_embeddings
np.testing.assert_almost_equal(
total_fwd_embeddings/total_embeddings, 0.5, 2)
#normalize each column of reconstructed_pwm
reconstructed_pwm_fwd = reconstructed_pwm_fwd/total_fwd_embeddings
reconstructed_pwm_rev = reconstructed_pwm_rev/total_rev_embeddings
np.testing.assert_almost_equal(actual_pwm, reconstructed_pwm_fwd, 2)
np.testing.assert_almost_equal(actual_pwm,
reconstructed_pwm_rev[::-1,::-1], 2)
#test the quantities of motifs were sampled uniformly
for quantity in range(min_counts, max_counts+1):
np.testing.assert_almost_equal(
quantity_distribution[quantity]/float(num_sequences),
1.0/(max_counts-min_counts+1),2)
|
11550626
|
import numpy as np
from ..util.backend_functions import backend as bd
from .diffractive_element import DOE
class BinaryFZP(DOE):
def __init__(self, f, λ, radius = None, aberration = None):
"""
Creates a Phase Binary Fresnel Zone Plate with a focal length equal to f for a wavelength λ
"""
global bd
from ..util.backend_functions import backend as bd
self.f = f
self.FZP_λ = λ
self.radius = radius
def get_transmittance(self, xx, yy, λ):
t = 1
if self.radius != None:
t = bd.where((xx**2 + yy**2) < self.radius**2, t, bd.zeros_like(xx))
r_2 = xx**2 + yy**2
phase_shift = bd.pi* (bd.sign(((2*bd.pi/self.FZP_λ * (bd.sqrt(f**2 + r_2) - f))) % (2*bd.pi) - bd.pi ))/2.
t = t*bd.exp(1j*phase_shift)
return t
class FZP(DOE):
def __init__(self, f, λ, radius = None, aberration = None):
"""
Creates a Phase Blazed (Ideal) Fresnel Zone Plate with a focal length equal to f for a wavelength λ
"""
global bd
from ..util.backend_functions import backend as bd
self.f = f
self.FZP_λ = λ
self.radius = radius
def get_transmittance(self, xx, yy, λ):
t = 1
if self.radius != None:
t = bd.where((xx**2 + yy**2) < self.radius**2, t, bd.zeros_like(xx))
r_2 = xx**2 + yy**2
phase_shift = -(2*bd.pi/λ * (bd.sqrt(self.f**2 + r_2) - self.f))
t = t*bd.exp(1j*phase_shift)
return t
|
11550656
|
import pytest
from moto.dynamodb2.models import Table
@pytest.fixture
def table():
return Table(
"Forums",
schema=[
{"KeyType": "HASH", "AttributeName": "forum_name"},
{"KeyType": "RANGE", "AttributeName": "subject"},
],
attr=[
{"AttributeType": "S", "AttributeName": "forum_name"},
{"AttributeType": "S", "AttributeName": "subject"},
],
)
|
11550661
|
from typing import Optional, Dict
from math import floor
from time import time
from ..deviation import getTimer
class Recorder(object):
def __init__(self, interval: Optional[int] = 1, compensate: Optional[bool] = True,
timestamp: Optional[float] = time(), **kwargs):
assert (interval > 0)
def dont_compensate(timestamp):
return timestamp
self.compensate = getTimer().compensate if compensate is True else dont_compensate
self.interval = interval
self.reference_slot = self._calc_slot(timestamp)
self.basket = {}
for key in kwargs:
self.basket[key] = kwargs[key]
def record(self, timestamp: Optional[float] = time(), **kwargs) -> Dict[str, int]:
current_slot = self._calc_slot(timestamp)
out = None
if int(current_slot) != self.reference_slot:
# If nothing was recorded - there's nothing to return!
# print(self.basket)
if len(self.basket) > 0:
self.basket['timestamp'] = int(self.reference_slot * self.interval)
out = self.basket
self.reference_slot = current_slot
self.basket = {}
if current_slot == self.reference_slot:
for key in kwargs:
if key in self.basket:
self.basket[key] += kwargs[key]
else:
self.basket[key] = kwargs[key]
return out
def get_interval(self) -> int:
return self.interval
def get_slot_start(self) -> int:
return int(self.reference_slot * self.interval)
def get(self, key: str) -> int:
if key is 'timestamp':
return self.reference_slot * self.interval
return self.basket[key]
def _calc_slot(self, timestamp: float) -> int:
return int(floor(self.compensate(timestamp) / self.interval))
|
11550681
|
import time, pytest, inspect
import yaml
from utils import *
def test_brave_with_no_config_file(run_brave):
run_brave()
check_brave_is_running()
def test_brave_with_missing_config_file(run_brave):
run_brave('not-a-real-config-file')
check_return_value(1)
def test_brave_with_invalid_input_type(run_brave, create_config_file):
config = {'inputs': [{'type': 'not-a-valid-type'}]}
config_file = create_config_file(config)
run_brave(config_file.name)
check_return_value(1)
def test_brave_with_full_config_file(run_brave, create_config_file):
output_image_location = create_output_image_location()
output_video_location = create_output_video_location()
file_asset = test_directory() + '/assets/5_second_video.mp4'
config = {
'inputs': [
{'type': 'test_video'},
{'type': 'test_audio', 'freq': 200 } ,
{'type': 'test_audio', 'freq': 600 } ,
{'type': 'uri', 'uri': 'file://' + file_asset }
],
'outputs': [
{'type': 'local', 'source': 'input4'},
{'type': 'tcp'},
{'type': 'file', 'source': 'input1', 'location': output_video_location},
{'type': 'image', 'source': 'input2', 'location': output_image_location}
]
}
config_file = create_config_file(config)
run_brave(config_file.name)
time.sleep(3)
check_brave_is_running()
response = api_get('/api/all')
assert response.status_code == 200
assert_everything_in_playing_state(response.json())
assert response.json()['inputs'][0]['type'] == 'test_video'
assert response.json()['inputs'][1]['type'] == 'test_audio'
assert response.json()['inputs'][2]['type'] == 'test_audio'
assert response.json()['inputs'][1]['freq'] == 200
assert response.json()['inputs'][2]['freq'] == 600
assert response.json()['outputs'][0]['type'] == 'local'
assert response.json()['outputs'][1]['type'] == 'tcp'
assert response.json()['outputs'][2]['type'] == 'file'
assert response.json()['outputs'][3]['type'] == 'image'
assert response.json()['outputs'][2]['location'] == output_video_location
assert response.json()['outputs'][2]['source'] == 'input1'
assert response.json()['outputs'][3]['source'] == 'input2'
def test_non_string_keys(run_brave, create_config_file):
config = {
'inputs': [
{ 1: 'oh look 1 is not a string'}
]
}
config_file = create_config_file(config)
run_brave(config_file.name)
check_return_value(1)
def test_config_file_with_ids(run_brave, create_config_file):
config = {
'inputs': [
{'type': 'test_video'},
{'type': 'test_video', 'id': 10}
],
'outputs': [
{'type': 'image', 'id': 1},
{'type': 'image'},
{'type': 'image'}
],
'mixers': [
{},
{'id': 2}
],
'overlays': [
{'type': 'clock', 'id': 7}
],
}
config_file = create_config_file(config)
run_brave(config_file.name)
check_brave_is_running()
response = api_get('/api/all')
assert response.status_code == 200
assert len(response.json()['inputs']) == 2
assert len(response.json()['outputs']) == 3
assert len(response.json()['mixers']) == 2
assert len(response.json()['overlays']) == 1
assert response.json()['inputs'][0]['id'] == 1
assert response.json()['inputs'][1]['id'] == 10
assert response.json()['outputs'][0]['id'] == 1
assert response.json()['outputs'][1]['id'] == 2
assert response.json()['outputs'][2]['id'] == 3
assert response.json()['mixers'][0]['id'] == 1
assert response.json()['mixers'][1]['id'] == 2
assert response.json()['overlays'][0]['id'] == 7
|
11550694
|
import numpy as np
from sklearn.metrics import roc_curve, accuracy_score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ROCMeter(object):
"""Compute TPR with fixed FPR"""
def __init__(self):
self.reset()
def reset(self):
self.target = np.ones(0)
self.output = np.ones(0)
def update(self, target, output):
# If we use cross-entropy
if len(output.shape) > 1 and output.shape[1] > 1:
output = output[:,1]
elif len(output.shape) > 1 and output.shape[1] == 1:
output = output[:,0]
self.target = np.hstack([self.target, target])
self.output = np.hstack([self.output, output])
def get_tpr(self, fixed_fpr):
fpr, tpr, thr = roc_curve(self.target, self.output)
tpr_filtered = tpr[fpr <= fixed_fpr]
if len(tpr_filtered) == 0:
return 0.0
return tpr_filtered[-1]
def get_accuracy(self, thr=0.5):
acc = accuracy_score(self.target,
self.output >= thr)
return acc
def get_top_hard_examples(self, top_n=10):
diff_arr = np.abs(self.target - self.output)
hard_indexes = np.argsort(diff_arr)[::-1]
hard_indexes = hard_indexes[:top_n]
return hard_indexes, self.target[hard_indexes], self.output[hard_indexes]
|
11550705
|
import datetime
import logging
import random
import sys
import time
class WaitForConditionUtil(object):
@staticmethod
def _sleep_seconds_generator(polling_interval_seconds):
"""
Function yields seconds in range [polling_interval_seconds / 2, polling_interval_seconds] and
the very first value will be 0.
"""
yield 0
# making sure it is float
polling_interval_seconds = polling_interval_seconds * 1.0
while True:
yield random.uniform(polling_interval_seconds / 2, polling_interval_seconds)
@staticmethod
def wait_for_condition(condition_predicate, timeout_seconds, period_seconds, timeout_err):
end_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=timeout_seconds)
sleep_seconds_generator = WaitForConditionUtil._sleep_seconds_generator(period_seconds)
cur_exc_info = None
while datetime.datetime.utcnow() <= end_datetime:
time.sleep(next(sleep_seconds_generator))
cur_exc_info = None
try:
condition_flag, output = condition_predicate()
if condition_flag:
return output
except Exception as exc:
logging.error(str(exc))
cur_exc_info = sys.exc_info()
# If there is an active exception, then we will report that instead of timeout.
if cur_exc_info:
raise cur_exc_info[0](cur_exc_info[1]).with_traceback(cur_exc_info[2])
logging.error(str(timeout_err))
raise timeout_err
|
11550723
|
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.distributions import MultivariateNormal as MVN
class ReweightL2(_Loss):
def __init__(self, train_dist, reweight='inverse'):
super(ReweightL2, self).__init__()
self.reweight = reweight
self.train_dist = train_dist
def forward(self, pred, target):
reweight = self.reweight
prob = self.train_dist.log_prob(target).exp().squeeze(-1)
if reweight == 'inverse':
inv_prob = prob.pow(-1)
elif reweight == 'sqrt_inv':
inv_prob = prob.pow(-0.5)
else:
raise NotImplementedError
inv_prob = inv_prob / inv_prob.sum()
loss = F.mse_loss(pred, target, reduction='none').sum(-1) * inv_prob
loss = loss.sum()
return loss
class GAILossMD(_Loss):
"""
Multi-Dimension version GAI, compatible with 1-D GAI
"""
def __init__(self, init_noise_sigma, gmm):
super(GAILossMD, self).__init__()
self.gmm = gmm
self.gmm = {k: torch.tensor(self.gmm[k]) for k in self.gmm}
self.noise_sigma = torch.nn.Parameter(torch.tensor(init_noise_sigma))
def forward(self, pred, target):
noise_var = self.noise_sigma ** 2
loss = gai_loss_md(pred, target, self.gmm, noise_var)
return loss
def gai_loss_md(pred, target, gmm, noise_var):
I = torch.eye(pred.shape[-1])
mse_term = -MVN(pred, noise_var*I).log_prob(target)
balancing_term = MVN(gmm['means'], gmm['variances']+noise_var*I).log_prob(pred.unsqueeze(1)) + gmm['weights'].log()
balancing_term = torch.logsumexp(balancing_term, dim=1)
loss = mse_term + balancing_term
loss = loss * (2 * noise_var).detach()
return loss.mean()
class BMCLossMD(_Loss):
"""
Multi-Dimension version BMC, compatible with 1-D BMC
"""
def __init__(self, init_noise_sigma):
super(BMCLossMD, self).__init__()
self.noise_sigma = torch.nn.Parameter(torch.tensor(init_noise_sigma))
def forward(self, pred, target):
noise_var = self.noise_sigma ** 2
loss = bmc_loss_md(pred, target, noise_var)
return loss
def bmc_loss_md(pred, target, noise_var):
I = torch.eye(pred.shape[-1])
logits = MVN(pred.unsqueeze(1), noise_var*I).log_prob(target.unsqueeze(0))
loss = F.cross_entropy(logits, torch.arange(pred.shape[0]))
loss = loss * (2 * noise_var).detach()
return loss
|
11550742
|
from astropy.wcs.utils import wcs_to_celestial_frame
from astropy.coordinates import (ICRS, FK5, FK4, Galactic,
HeliocentricTrueEcliptic,
BarycentricTrueEcliptic)
from .decorators import auto_refresh, fixdocstring
__all__ = ['AxisLabels']
class AxisLabels(object):
def __init__(self, parent):
self._ax = parent.ax
self._wcs = parent.ax.wcs
self.x = parent.x
self.y = parent.y
self._ax.coords[self.x].set_axislabel_visibility_rule('always')
self._ax.coords[self.y].set_axislabel_visibility_rule('always')
xcoord_type = self._ax.coords[self.x].coord_type
ycoord_type = self._ax.coords[self.y].coord_type
if xcoord_type == 'longitude' and ycoord_type == 'latitude':
celestial = True
inverted = False
elif xcoord_type == 'latitude' and ycoord_type == 'longitude':
celestial = True
inverted = True
else:
celestial = inverted = False
if celestial:
frame = wcs_to_celestial_frame(self._wcs)
else:
frame = None
if isinstance(frame, ICRS):
xtext = 'RA (ICRS)'
ytext = 'Dec (ICRS)'
elif isinstance(frame, FK5):
equinox = "{:g}".format(FK5.equinox.jyear)
xtext = 'RA (J{0})'.format(equinox)
ytext = 'Dec (J{0})'.format(equinox)
elif isinstance(frame, FK4):
equinox = "{:g}".format(FK4.equinox.byear)
xtext = 'RA (B{0})'.format(equinox)
ytext = 'Dec (B{0})'.format(equinox)
elif isinstance(frame, Galactic):
xtext = 'Galactic Longitude'
ytext = 'Galactic Latitude'
elif isinstance(frame, (HeliocentricTrueEcliptic, BarycentricTrueEcliptic)):
# NOTE: once we support only Astropy 2.0+, we can use BaseEclipticFrame
xtext = 'Ecliptic Longitude'
ytext = 'Ecliptic Latitude'
else:
cunit_x = self._wcs.wcs.cunit[self.x]
cunit_y = self._wcs.wcs.cunit[self.y]
cname_x = self._wcs.wcs.cname[self.x]
cname_y = self._wcs.wcs.cname[self.y]
ctype_x = self._wcs.wcs.ctype[self.x]
ctype_y = self._wcs.wcs.ctype[self.y]
xunit = " (%s)" % cunit_x if cunit_x not in ["", None] else ""
yunit = " (%s)" % cunit_y if cunit_y not in ["", None] else ""
if len(cname_x) > 0:
xtext = cname_x + xunit
else:
if len(ctype_x) == 8 and ctype_x[4] == '-':
xtext = ctype_x[:4].replace('-', '') + xunit
else:
xtext = ctype_x + xunit
if len(cname_y) > 0:
ytext = cname_y + yunit
else:
if len(ctype_y) == 8 and ctype_y[4] == '-':
ytext = ctype_y[:4].replace('-', '') + yunit
else:
ytext = ctype_y + yunit
if inverted:
xtext, ytext = ytext, xtext
self.set_xtext(xtext)
self.set_ytext(ytext)
self.set_xposition('bottom')
self.set_yposition('left')
@auto_refresh
def set_xtext(self, label):
"""
Set the x-axis label text.
"""
self._x_text = label
self._ax.coords[self.x].set_axislabel(label)
@auto_refresh
def set_ytext(self, label):
"""
Set the y-axis label text.
"""
self._y_text = label
self._ax.coords[self.y].set_axislabel(label)
@auto_refresh
def set_xpad(self, pad):
"""
Set the x-axis label displacement in terms of the axis label font size.
"""
self._ax.coords[self.x].axislabels.set_minpad(pad)
@auto_refresh
def set_ypad(self, pad):
"""
Set the y-axis label displacement in terms of the axis label font size.
"""
self._ax.coords[self.y].axislabels.set_minpad(pad)
@auto_refresh
@fixdocstring
def set_font(self, **kwargs):
"""
Set the font of the axis labels.
Parameters
----------
common: family, style, variant, stretch, weight, size, fontproperties
Notes
-----
Default values are set by matplotlib or previously set values if
set_font has already been called. Global default values can be set by
editing the matplotlibrc file.
"""
self._ax.coords[self.x].axislabels.set(**kwargs)
self._ax.coords[self.y].axislabels.set(**kwargs)
@auto_refresh
def show(self):
"""
Show the x- and y-axis labels.
"""
self.show_x()
self.show_y()
@auto_refresh
def hide(self):
"""
Hide the x- and y-axis labels.
"""
self.hide_x()
self.hide_y()
@auto_refresh
def show_x(self):
"""
Show the x-axis label.
"""
if self._xposition == 'bottom':
self._ax.coords[self.x].set_axislabel_position('b')
else:
self._ax.coords[self.x].set_axislabel_position('t')
@auto_refresh
def hide_x(self):
"""
Hide the x-axis label.
"""
self._ax.coords[self.x].set_axislabel_position('')
@auto_refresh
def show_y(self):
"""
Show the y-axis label.
"""
if self._yposition == 'left':
self._ax.coords[self.y].set_axislabel_position('l')
else:
self._ax.coords[self.y].set_axislabel_position('r')
@auto_refresh
def hide_y(self):
"""
Hide the y-axis label.
"""
self._ax.coords[self.y].set_axislabel_position('')
@auto_refresh
def set_xposition(self, position):
"""
Set the position of the x-axis label ('top' or 'bottom')
"""
if position == 'bottom':
self._ax.coords[self.x].set_axislabel_position('b')
elif position == 'top':
self._ax.coords[self.x].set_axislabel_position('t')
else:
raise ValueError("position should be one of 'top' or 'bottom'")
self._xposition = position
@auto_refresh
def set_yposition(self, position):
"""
Set the position of the y-axis label ('left' or 'right')
"""
if position == 'left':
self._ax.coords[self.y].set_axislabel_position('l')
elif position == 'right':
self._ax.coords[self.y].set_axislabel_position('r')
else:
raise ValueError("position should be one of 'left' or 'right'")
self._yposition = position
|
11550747
|
from django.contrib import messages
from django.db import DataError
from dfirtrack_main.importer.file.csv_attributes_check import check_and_create_ip
from dfirtrack_main.logger.default_logger import warning_logger
from dfirtrack_main.models import (
Case,
Company,
Dnsname,
Domain,
Location,
Os,
Reason,
Recommendation,
Serviceprovider,
Systemtype,
Tag,
Tagcolor,
)
def create_lock_tags(model):
# get tagcolor
tagcolor_white = Tagcolor.objects.get(tagcolor_name='white')
""" lock systemstatus """
# get existing lock systemstatus tag
try:
tag_lock_systemstatus = Tag.objects.get(
tag_name=model.csv_tag_lock_systemstatus,
)
# get or create lock systemstatus tag
except Tag.DoesNotExist:
tag_lock_systemstatus, created = Tag.objects.get_or_create(
tag_name=model.csv_tag_lock_systemstatus,
tagcolor=tagcolor_white,
)
# call logger
if created:
tag_lock_systemstatus.logger(
model.csv_import_username.username,
' SYSTEM_IMPORTER_FILE_CSV_TAG_CREATED',
)
""" lock analysisstatus """
# get existing lock analysisstatus tag
try:
tag_lock_analysisstatus = Tag.objects.get(
tag_name=model.csv_tag_lock_analysisstatus,
)
# get or create lock analysisstatus tag
except Tag.DoesNotExist:
tag_lock_analysisstatus, created = Tag.objects.get_or_create(
tag_name=model.csv_tag_lock_analysisstatus,
tagcolor=tagcolor_white,
)
# call logger
if created:
tag_lock_analysisstatus.logger(
model.csv_import_username.username,
' SYSTEM_IMPORTER_FILE_CSV_TAG_CREATED',
)
def add_fk_attributes(system, system_created, model, row, row_counter, request=None):
"""add foreign key relationships to system"""
""" set username for logger """
# if function was called from 'system_instant' and 'system_upload'
if request:
logger_username = str(request.user)
# if function was called from 'system_cron'
else:
logger_username = model.csv_import_username.username
""" systemstatus (tagfree is set with tags in 'csv_attributes_add.add_many2many_attributes()') """
# set systemstatus for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_systemstatus):
# set default systemstatus for new system
if system_created:
# set systemstatus for new system
system.systemstatus = model.csv_default_systemstatus
# change systemstatus for existing system if not locked
else:
# get lockstatus
tag_lock_systemstatus = Tag.objects.get(
tag_name=model.csv_tag_lock_systemstatus
)
# check for lockstatus in all tags of system
if tag_lock_systemstatus not in system.tag.all():
# change to default systemstatus for existing system
system.systemstatus = model.csv_default_systemstatus
""" analysisstatus (tagfree is set with tags in 'csv_attributes_add.add_many2many_attributes()') """
# set analysisstatus for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_analysisstatus):
# set default analysisstatus for new system
if system_created:
# set analysisstatus for new system
system.analysisstatus = model.csv_default_analysisstatus
# change analysisstatus for existing system if not locked
else:
# get lockstatus
tag_lock_analysisstatus = Tag.objects.get(
tag_name=model.csv_tag_lock_analysisstatus
)
# check for lockstatus in all tags of system
if tag_lock_analysisstatus not in system.tag.all():
# change to default analysisstatus for existing system
system.analysisstatus = model.csv_default_analysisstatus
""" dnsname """
# set dnsname for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_dnsname):
# get dnsname from CSV
if model.csv_choice_dnsname:
# check for index error
try:
# get dnsname from CSV column
dnsname_name = row[model.csv_column_dnsname - 1]
# check for empty string
if dnsname_name:
# value is valid
try:
# get or create dnsname
dnsname, created = Dnsname.objects.get_or_create(
dnsname_name=dnsname_name
)
# call logger if created
if created:
dnsname.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_DNSNAME_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for DNS name in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_DNSNAME_COLUMN row_{row_counter}:invalid_dnsname',
)
# set empty value
dnsname = None
# string was empty
else:
# set empty value (field is empty)
dnsname = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for DNS name in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_DNSNAME_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
dnsname = None
# get dnsname from DB
elif model.csv_default_dnsname:
dnsname = model.csv_default_dnsname
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
dnsname = None
# set dnsname for system
system.dnsname = dnsname
""" domain """
# set domain for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_domain):
# get domain from CSV
if model.csv_choice_domain:
# check for index error
try:
# get domain from CSV column
domain_name = row[model.csv_column_domain - 1]
# check for empty string and compare to system name (when queried with local account, hostname is returned under some circumstances depending on tool)
if domain_name and domain_name != system.system_name:
# value is valid
try:
# get or create domain
domain, created = Domain.objects.get_or_create(
domain_name=domain_name
)
# call logger if created
if created:
domain.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_DOMAIN_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for domain in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_DOMAIN_COLUMN row_{row_counter}:invalid_domain',
)
# set empty value
domain = None
# string was empty or same as system_name
else:
# set empty value (field is empty)
domain = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for domain in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_DOMAIN_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
domain = None
# get domain from DB
elif model.csv_default_domain:
domain = model.csv_default_domain
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
domain = None
# set domain for system
system.domain = domain
""" location """
# set location for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_location):
# get location from CSV
if model.csv_choice_location:
# check for index error
try:
# get location from CSV column
location_name = row[model.csv_column_location - 1]
# check for empty string
if location_name:
# value is valid
try:
# get or create location
location, created = Location.objects.get_or_create(
location_name=location_name
)
# call logger if created
if created:
location.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_LOCATION_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for location in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_LOCATION_COLUMN row_{row_counter}:invalid_location',
)
# set empty value
location = None
# string was empty
else:
# set empty value (field is empty)
location = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for location in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_LOCATION_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
location = None
# get location from DB
elif model.csv_default_location:
location = model.csv_default_location
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
location = None
# set location for system
system.location = location
""" os """
# set os for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_os):
# get os from CSV
if model.csv_choice_os:
# check for index error
try:
# get os from CSV column
os_name = row[model.csv_column_os - 1]
# check for empty string
if os_name:
# value is valid
try:
# get or create os
os, created = Os.objects.get_or_create(os_name=os_name)
# call logger if created
if created:
os.logger(
logger_username, ' SYSTEM_IMPORTER_FILE_CSV_OS_CREATED'
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for OS in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_OS_COLUMN row_{row_counter}:invalid_os',
)
# set empty value
os = None
# string was empty
else:
# set empty value (field is empty)
os = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request, f'Index for OS in row {row_counter} was out of range.'
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_OS_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
os = None
# get os from DB
elif model.csv_default_os:
os = model.csv_default_os
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
os = None
# set os for system
system.os = os
""" reason """
# set reason for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_reason):
# get reason from CSV
if model.csv_choice_reason:
# check for index error
try:
# get reason from CSV column
reason_name = row[model.csv_column_reason - 1]
# check for empty string
if reason_name:
# value is valid
try:
# get or create reason
reason, created = Reason.objects.get_or_create(
reason_name=reason_name
)
# call logger if created
if created:
reason.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_REASON_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for reason in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_REASON_COLUMN row_{row_counter}:invalid_reason',
)
# set empty value
reason = None
# string was empty
else:
# set empty value (field is empty)
reason = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for reason in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_REASON_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
reason = None
# get reason from DB
elif model.csv_default_reason:
reason = model.csv_default_reason
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
reason = None
# set reason for system
system.reason = reason
""" recommendation """
# set recommendation for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_recommendation):
# get recommendation from CSV
if model.csv_choice_recommendation:
# check for index error
try:
# get recommendation from CSV column
recommendation_name = row[model.csv_column_recommendation - 1]
# check for empty string
if recommendation_name:
# value is valid
try:
# get or create recommendation
recommendation, created = Recommendation.objects.get_or_create(
recommendation_name=recommendation_name
)
# call logger if created
if created:
recommendation.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_RECOMMENDATION_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for recommendation in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_RECOMMENDATION_COLUMN row_{row_counter}:invalid_recommendation',
)
# set empty value
recommendation = None
# string was empty
else:
# set empty value (field is empty)
recommendation = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for recommendation in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_RECOMMENDATION_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
recommendation = None
# get recommendation from DB
elif model.csv_default_recommendation:
recommendation = model.csv_default_recommendation
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
recommendation = None
# set recommendation for system
system.recommendation = recommendation
""" serviceprovider """
# set serviceprovider for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_serviceprovider):
# get serviceprovider from CSV
if model.csv_choice_serviceprovider:
# check for index error
try:
# get serviceprovider from CSV column
serviceprovider_name = row[model.csv_column_serviceprovider - 1]
# check for empty string
if serviceprovider_name:
# value is valid
try:
# get or create serviceprovider
(
serviceprovider,
created,
) = Serviceprovider.objects.get_or_create(
serviceprovider_name=serviceprovider_name
)
# call logger if created
if created:
serviceprovider.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_SERVICEPROVIDER_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for serviceprovider in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_SERVICEPROVIDER_COLUMN row_{row_counter}:invalid_serviceprovider',
)
# set empty value
serviceprovider = None
# string was empty
else:
# set empty value (field is empty)
serviceprovider = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for serviceprovider in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_SERVICEPROVIDER_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
serviceprovider = None
# get serviceprovider from DB
elif model.csv_default_serviceprovider:
serviceprovider = model.csv_default_serviceprovider
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
serviceprovider = None
# set serviceprovider for system
system.serviceprovider = serviceprovider
""" systemtype """
# set systemtype for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_systemtype):
# get systemtype from CSV
if model.csv_choice_systemtype:
# check for index error
try:
# get systemtype from CSV column
systemtype_name = row[model.csv_column_systemtype - 1]
# check for empty string
if systemtype_name:
# value is valid
try:
# get or create systemtype
systemtype, created = Systemtype.objects.get_or_create(
systemtype_name=systemtype_name
)
# call logger if created
if created:
systemtype.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_SYSTEMTYPE_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for systemtype in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_SYSTEMTYPE_COLUMN row_{row_counter}:invalid_systemtype',
)
# set empty value
systemtype = None
# string was empty
else:
# set empty value (field is empty)
systemtype = None
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for systemtype in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_SYSTEMTYPE_COLUMN row_{row_counter}:out_of_range',
)
# set empty value
systemtype = None
# get systemtype from DB
elif model.csv_default_systemtype:
systemtype = model.csv_default_systemtype
# set empty value (removes for existing system if neither CSV nor DB is chosen, does nothing for new system)
else:
systemtype = None
# set systemtype for system
system.systemtype = systemtype
# return system with foreign key relations to 'csv_main.system_handler'
return system
def add_many2many_attributes(
system, system_created, model, row, row_counter, request=None
):
"""add many2many relationships to system"""
""" set username for logger and object """
# if function was called from 'system_instant' and 'system_upload'
if request:
# get user for object
csv_import_user = request.user
# get user for logger
logger_username = str(request.user)
# if function was called from 'system_cron'
else:
# get user for object
csv_import_user = model.csv_import_username
# get user for logger
logger_username = model.csv_import_username.username
""" IP addresses """
# add IPs for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_ip):
# remove IPs if not new system
if not system_created:
# remove all IPs
system.ip.clear()
# get IPs from CSV
if model.csv_choice_ip:
# check for index error
try:
# get IP string
ip_string = row[model.csv_column_ip - 1]
# check for empty string
if ip_string:
# get IP delimiter from config
if model.csv_ip_delimiter == 'ip_comma':
ip_delimiter = ','
elif model.csv_ip_delimiter == 'ip_semicolon':
ip_delimiter = ';'
elif model.csv_ip_delimiter == 'ip_space':
ip_delimiter = ' '
# split IP string to list depending on delimiter
ip_list = ip_string.split(ip_delimiter)
# iterate over list elements
for ip_ip in ip_list:
# if function was called from 'system_instant' and 'system_upload'
if request:
# check, get or create IP
ip = check_and_create_ip(ip_ip, model, row_counter, request)
# if function was called from 'system_cron'
else:
# check, get or create IP
ip = check_and_create_ip(ip_ip, model, row_counter)
# IP was returned from 'check_and_create_ip'
if ip:
# add ip to system
system.ip.add(ip)
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request, f'Index for IP in row {row_counter} was out of range.'
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_IP_COLUMN row_{row_counter}:out_of_range',
)
""" case """
# set case for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_case):
# remove cases if not new system
if not system_created:
# remove all cases
system.case.clear()
# get case from CSV
if model.csv_choice_case:
# check for index error
try:
# get case from CSV column
case_name = row[model.csv_column_case - 1]
# check for empty string
if case_name:
# get existing case
try:
case = Case.objects.get(
case_name=case_name,
)
# create new case
except Case.DoesNotExist:
# value is valid
try:
case, created = Case.objects.get_or_create(
case_name=case_name,
case_is_incident=False,
case_created_by_user_id=csv_import_user,
)
# call logger if created
if created:
case.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_CASE_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for case in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_CASE_COLUMN row_{row_counter}:invalid_case',
)
# set empty value
case = None
# only add case to system if one of the previous checks was successful
if case:
# set case for system
system.case.add(case)
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for case in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_CASE_COLUMN row_{row_counter}:out_of_range',
)
# get case from DB
elif model.csv_default_case:
cases = model.csv_default_case
for case in cases.all():
# add case to system
system.case.add(case)
""" company """
# set company for new system or change if remove old is set
if system_created or (not system_created and model.csv_remove_company):
# remove companies if not new system
if not system_created:
# remove all companies
system.company.clear()
# get company from CSV
if model.csv_choice_company:
# check for index error
try:
# get company from CSV column
company_name = row[model.csv_column_company - 1]
# check for empty string
if company_name:
# value is valid
try:
# get or create company
company, created = Company.objects.get_or_create(
company_name=company_name
)
# call logger if created
if created:
company.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_COMPANY_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for company in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_COMPANY_COLUMN row_{row_counter}:invalid_company',
)
# set empty value
company = None
# only add company to system if one of the previous checks was successful
if company:
# set company for system
system.company.add(company)
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Index for company in row {row_counter} was out of range.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_COMPANY_COLUMN row_{row_counter}:out_of_range',
)
# get company from DB
elif model.csv_default_company:
companys = model.csv_default_company
for company in companys.all():
# add company to system
system.company.add(company)
""" tag """
# set tag for new system or change if remove old is set
if system_created or (
not system_created and model.csv_remove_tag != 'tag_remove_none'
):
"""prepare tag prefix"""
# get tag delimiter from config
if model.csv_tag_prefix_delimiter == 'tag_prefix_underscore':
tag_prefix_delimiter = '_'
elif model.csv_tag_prefix_delimiter == 'tag_prefix_hyphen':
tag_prefix_delimiter = '-'
elif model.csv_tag_prefix_delimiter == 'tag_prefix_period':
tag_prefix_delimiter = '.'
else:
tag_prefix_delimiter = None
# build tagprefix string from prefix and delimiter
if model.csv_tag_prefix and tag_prefix_delimiter:
tagprefix = model.csv_tag_prefix + tag_prefix_delimiter
""" remove tags for existing systems (either all or just with prefix) """
# remove all tags
if not system_created and model.csv_remove_tag == 'tag_remove_all':
# remove all tags
system.tag.clear()
# remove tags with prefix (and keep other / manually set tags)
elif not system_created and model.csv_remove_tag == 'tag_remove_prefix':
# get all relevant tags for this system
prefixtags = system.tag.filter(tag_name__startswith=tagprefix)
# iterate over tags
for prefixtag in prefixtags:
# remove this tag relation from system
prefixtag.system_set.remove(system)
""" add tags from CSV or DB """
# get tags from CSV
if model.csv_choice_tag:
# check for index error
try:
# get tagstring from CSV column
tag_string = row[model.csv_column_tag - 1]
# check for empty string
if tag_string:
# get tag delimiter from config
if model.csv_tag_delimiter == 'tag_comma':
tag_delimiter = ','
elif model.csv_tag_delimiter == 'tag_semicolon':
tag_delimiter = ';'
elif model.csv_tag_delimiter == 'tag_space':
tag_delimiter = ' '
# split tag string to list depending on delimiter
tag_list = tag_string.split(tag_delimiter)
# get tagcolor
tagcolor_primary = Tagcolor.objects.get(tagcolor_name='primary')
# iterate over tags
for tag in tag_list:
# build tagname from prefix, prefix delimiter and name
tagname = tagprefix + tag
# get existing tag
try:
tag = Tag.objects.get(
tag_name=tagname,
)
# create new tag
except Tag.DoesNotExist:
# value is valid
try:
tag, created = Tag.objects.get_or_create(
tag_name=tagname,
tagcolor=tagcolor_primary,
)
# call logger if created
if created:
tag.logger(
logger_username,
' SYSTEM_IMPORTER_FILE_CSV_TAG_CREATED',
)
# value is not valid
except DataError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request,
f'Value for tag in row {row_counter} was not a valid value.',
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_TAG_COLUMN row_{row_counter}:invalid_tag',
)
# set empty value
tag = None
# only add tag to system if one of the previous checks was successful
if tag:
# add tag to system
system.tag.add(tag)
# index out of range
except IndexError:
# if function was called from 'system_instant' and 'system_upload'
if request:
# call message
messages.warning(
request, f'Index for tag in row {row_counter} was out of range.'
)
# call logger
warning_logger(
logger_username,
f' SYSTEM_IMPORTER_FILE_CSV_TAG_COLUMN row_{row_counter}:out_of_range',
)
# get tags from DB
elif model.csv_default_tag:
tags = model.csv_default_tag
for tag in tags.all():
# add tag to system
system.tag.add(tag)
""" change systemstatus / analysisstatus for systems w/o tags"""
# if tag from CSV are enabled
if model.csv_choice_tag:
# check for index error
try:
# get tagstring from CSV column
tag_string = row[model.csv_column_tag - 1]
# no tags for this system
if not tag_string:
"""systemstatus"""
# tagfree systemstatus is set
if model.csv_choice_tagfree_systemstatus:
# set tagfree systemstatus for new system or change tagfree systemsstatus if remove old is set
if system_created or (
not system_created and model.csv_remove_systemstatus
):
# set tagfree systemstatus for new system
if system_created:
# set systemstatus for new system
system.systemstatus = (
model.csv_default_tagfree_systemstatus
)
# change systemstatus for existing system if not locked
else:
# get lockstatus
tag_lock_systemstatus = Tag.objects.get(
tag_name=model.csv_tag_lock_systemstatus
)
# check for lockstatus in all tags of system
if tag_lock_systemstatus not in system.tag.all():
# change to tagfree systemstatus for existing system
system.systemstatus = (
model.csv_default_tagfree_systemstatus
)
# save object
system.save()
""" analysisstatus """
# tagfree analysisstatus is set
if model.csv_choice_tagfree_analysisstatus:
# set tagfree status for new system or change to tagfree status if remove old is set
if system_created or (
not system_created and model.csv_remove_analysisstatus
):
# set tagfree analysisstatus for new system
if system_created:
# set analysisstatus for new system
system.analysisstatus = (
model.csv_default_tagfree_analysisstatus
)
# change analysisstatus for existing system if not locked
else:
# get lockstatus
tag_lock_analysisstatus = Tag.objects.get(
tag_name=model.csv_tag_lock_analysisstatus
)
# check for lockstatus in all tags of system
if tag_lock_analysisstatus not in system.tag.all():
# change to tagfree analysisstatus for existing system
system.analysisstatus = (
model.csv_default_tagfree_analysisstatus
)
# save object
system.save()
# index out of range
except IndexError:
# do not change systemstatus and / or analysisstatus
pass
# return system with many2many relations to 'csv_main.system_handler'
return system
|
11550813
|
import numbers
import re
import time
from datetime import datetime
from typing import Dict, List, Union
_unit_in_ms_without_week = {"s": 1000, "m": 60000, "h": 3600000, "d": 86400000}
_unit_in_ms = {**_unit_in_ms_without_week, "w": 604800000}
def datetime_to_ms(dt):
epoch = datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds() * 1000.0)
def ms_to_datetime(ms: Union[int, float]) -> datetime:
"""Converts milliseconds since epoch to datetime object.
Args:
ms (Union[int, float]): Milliseconds since epoch
Returns:
datetime: Datetime object.
"""
if ms < 0:
raise ValueError("ms must be greater than or equal to zero.")
return datetime.utcfromtimestamp(ms / 1000)
def time_string_to_ms(pattern, string, unit_in_ms):
pattern = pattern.format("|".join(unit_in_ms))
res = re.fullmatch(pattern, string)
if res:
magnitude = int(res.group(1))
unit = res.group(2)
return magnitude * unit_in_ms[unit]
return None
def granularity_to_ms(granularity: str) -> int:
ms = time_string_to_ms(r"(\d+)({})", granularity, _unit_in_ms_without_week)
if ms is None:
raise ValueError(
"Invalid granularity format: `{}`. Must be on format <integer>(s|m|h|d). E.g. '5m', '3h' or '1d'.".format(
granularity
)
)
return ms
def granularity_unit_to_ms(granularity: str) -> int:
granularity = re.sub(r"^\d+", "1", granularity)
return granularity_to_ms(granularity)
def time_ago_to_ms(time_ago_string: str) -> int:
"""Returns millisecond representation of time-ago string"""
if time_ago_string == "now":
return 0
ms = time_string_to_ms(r"(\d+)({})-ago", time_ago_string, _unit_in_ms)
if ms is None:
raise ValueError(
"Invalid time-ago format: `{}`. Must be on format <integer>(s|m|h|d|w)-ago or 'now'. E.g. '3d-ago' or '1w-ago'.".format(
time_ago_string
)
)
return ms
def timestamp_to_ms(timestamp: Union[int, float, str, datetime]) -> int:
"""Returns the ms representation of some timestamp given by milliseconds, time-ago format or datetime object
Args:
timestamp (Union[int, float, str, datetime]): Convert this timestamp to ms.
Returns:
int: Milliseconds since epoch representation of timestamp
"""
if isinstance(timestamp, numbers.Number): # float, int, int64 etc
ms = int(timestamp)
elif isinstance(timestamp, str):
ms = int(round(time.time() * 1000)) - time_ago_to_ms(timestamp)
elif isinstance(timestamp, datetime):
ms = datetime_to_ms(timestamp)
else:
raise TypeError(
"Timestamp `{}` was of type {}, but must be int, float, str or datetime,".format(timestamp, type(timestamp))
)
if ms < 0:
raise ValueError(
"Timestamps can't be negative - they must represent a time after 1.1.1970, but {} was provided".format(ms)
)
return ms
def _convert_time_attributes_in_dict(item: Dict) -> Dict:
TIME_ATTRIBUTES = [
"start_time",
"end_time",
"last_updated_time",
"created_time",
"timestamp",
"scheduled_execution_time",
"source_created_time",
"source_modified_time",
]
new_item = {}
for k, v in item.items():
if k in TIME_ATTRIBUTES:
try:
v = ms_to_datetime(v).strftime("%Y-%m-%d %H:%M:%S")
except (ValueError, OSError):
pass
new_item[k] = v
return new_item
def convert_time_attributes_to_datetime(item: Union[Dict, List[Dict]]) -> Union[Dict, List[Dict]]:
if isinstance(item, dict):
return _convert_time_attributes_in_dict(item)
if isinstance(item, list):
new_items = []
for el in item:
new_items.append(_convert_time_attributes_in_dict(el))
return new_items
raise TypeError("item must be dict or list of dicts")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.