hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e64c5713fab03280850caf053584020ebdd8489a | 7,064 | py | Python | scripts/union_candidates.py | ChuanyiZ/Moss | 8a54556782efc534055fcbdad2b4bf23de2bb77e | [
"BSD-3-Clause"
] | 1 | 2021-06-15T01:19:53.000Z | 2021-06-15T01:19:53.000Z | scripts/union_candidates.py | ChuanyiZ/Moss | 8a54556782efc534055fcbdad2b4bf23de2bb77e | [
"BSD-3-Clause"
] | 1 | 2021-08-02T16:36:26.000Z | 2021-08-02T16:36:26.000Z | scripts/union_candidates.py | ChuanyiZ/Moss | 8a54556782efc534055fcbdad2b4bf23de2bb77e | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 chuanyi5 <chuanyi5@illinois.edu>
#
# Distributed under terms of the MIT license.
"""
Generate candidates VCF from Strelka2, Mutect2 tumor VCFs
"""
import allel
import argparse
import numpy as np
import sys
def strelka2(inputs) -> dict:
# chrom: pos: normal_genotype
chrom_pos_gt = dict()
cnt = 0
for ifile in inputs:
in_vcf = allel.read_vcf(ifile, fields='*')
for chrom, pos, ref, alt, nt, is_pass in zip(in_vcf["variants/CHROM"], in_vcf["variants/POS"], in_vcf["variants/REF"], in_vcf["variants/ALT"], in_vcf["variants/NT"], in_vcf["variants/FILTER_PASS"]):
chrom = str(chrom)
pos = int(pos)
alt = alt[0]
num_pass = int(is_pass)
if nt == 'ref':
normal = ref + ref
elif nt == 'het':
normal = ref + alt
elif nt == 'hom':
normal = alt + alt
else:
continue
if chrom in chrom_pos_gt:
if pos in chrom_pos_gt[chrom]:
chrom_pos_gt[chrom][pos]["num_pass"] += num_pass
if chrom_pos_gt[chrom][pos]["gt"][0] == chrom_pos_gt[chrom][pos]["gt"][1] and normal[0] != normal[1]:
chrom_pos_gt[chrom][pos]["gt"] = normal
cnt += 1
else:
chrom_pos_gt[chrom][pos] = {"gt": normal, "num_pass": num_pass}
else:
chrom_pos_gt[chrom] = {pos: {"gt": normal, "num_pass": num_pass}}
print(f"Disagreement on normal: {cnt} times.")
return chrom_pos_gt
def mutect2(inputs, normal_name) -> dict:
# chrom: pos: normal_genotype
chrom_pos_gt = dict()
cnt = 0
cnt_het_hom = 0
for ifile in inputs:
# ["variants/CHROM", "variants/POS", "variants/REF", "variants/ALT", "calldata/GT"]
in_vcf = allel.read_vcf(ifile, fields='*')
idx_normal = np.argwhere(in_vcf["samples"] == normal_name)[0][0]
zipped = zip(in_vcf["variants/CHROM"][in_vcf["variants/is_snp"]],
in_vcf["variants/POS"][in_vcf["variants/is_snp"]],
in_vcf["variants/REF"][in_vcf["variants/is_snp"]],
in_vcf["variants/ALT"][in_vcf["variants/is_snp"]],
in_vcf["calldata/GT"][in_vcf["variants/is_snp"]],
in_vcf["variants/FILTER_PASS"][in_vcf["variants/is_snp"]],
in_vcf["variants/FILTER_artifact_in_normal"][in_vcf["variants/is_snp"]])
for chrom, pos, ref, alt, gt, is_pass, is_artifact in zipped:
if is_artifact:
continue
chrom = str(chrom)
pos = int(pos)
num_pass = int(is_pass)
alt = alt[0]
ref_alt = ref + alt
normal = ref_alt[gt[idx_normal][0]] + ref_alt[gt[idx_normal][1]]
if gt[idx_normal][0] != 0 or gt[idx_normal][1] != 0:
cnt_het_hom += 1
if chrom in chrom_pos_gt:
if pos in chrom_pos_gt[chrom]:
chrom_pos_gt[chrom][pos]["num_pass"] += num_pass
if chrom_pos_gt[chrom][pos]["gt"][0] == chrom_pos_gt[chrom][pos]["gt"][1] and normal[0] != normal[1]:
cnt += 1
chrom_pos_gt[chrom][pos]["gt"] = normal
else:
chrom_pos_gt[chrom][pos] = {"gt": normal, "num_pass": num_pass}
else:
chrom_pos_gt[chrom] = {pos: {"gt": normal, "num_pass": num_pass}}
print(f"Disagreement on normal: {cnt} times.")
print(f"Not ref: {cnt_het_hom} times.")
return chrom_pos_gt
def write_vcf(dict_chrom_pos_gt: dict, output, input_files, is_split: bool):
header = """##fileformat=VCFv4.1
##FILTER=<ID=PASS,Description="All filters passed">
##INFO=<ID=NUMPASS,Number=1,Type=Integer,Description=\"Number of samples that pass the base caller\">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tNORMAL\n"""
if is_split:
for chrom, pos_info in dict_chrom_pos_gt.items():
prefix = 'chr' if not chrom.startswith("chr") else ''
out_filename = output[:output.rfind('.')] + f".{prefix}{chrom}.vcf"
with open(out_filename, "w") as ofile:
ofile.write(header)
chunk = []
cnt = 0
for pos, info in pos_info.items():
gt = info["gt"]
num_pass = info["num_pass"]
info = f"NUMPASS={num_pass}"
if gt[0] == gt[1]:
chunk.append(
f"{chrom}\t{pos}\t.\t{gt[0]}\t.\t.\t.\t{info}\tGT\t0|0\n")
cnt += 1
if cnt % 1000 == 0:
ofile.writelines(chunk)
chunk = []
cnt = 0
ofile.writelines(chunk)
else:
with open(output, "w") as ofile:
ofile.write(header)
for chrom, pos_info in dict_chrom_pos_gt.items():
chunk = []
cnt = 0
for pos, info in pos_info.items():
gt = info["gt"]
num_pass = info["num_pass"]
info = f"NUMPASS={num_pass}"
if gt[0] == gt[1]:
chunk.append(
f"{chrom}\t{pos}\t.\t{gt[0]}\t.\t.\t.\t{info}\tGT\t0|0\n")
cnt += 1
if cnt % 1000 == 0:
ofile.writelines(chunk)
chunk = []
cnt = 0
ofile.writelines(chunk)
def main(args):
if args.input_files is not None:
with open(args.input_files) as ifile:
if args.input is None:
args.input = []
for line in ifile:
args.input.append(line.strip())
if args.tool[0].lower() == 'm':
if args.normal_name is None:
exit()
chrom_pos_gt = mutect2(args.input, args.normal_name)
elif args.tool[0].lower() == 's':
chrom_pos_gt = strelka2(args.input)
write_vcf(chrom_pos_gt, args.output, args.input_files, args.split)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="input tumor result VCF file", action="append")
parser.add_argument("--normal-name", help="name of the normal sample in the VCF file, only used for Mutect")
parser.add_argument("-f", "--input-files", help="input tumor result VCF file list")
parser.add_argument("-t", "--tool", help="[M|m|Mutect] or [S|s|Strelka]", required=True)
parser.add_argument("-o", "--output", help="output candidates VCF file")
parser.add_argument("--split", help="split output VCF by chromosomes", action="store_true")
args = parser.parse_args(None if sys.argv[1:] else ['-h'])
main(args)
| 40.83237 | 206 | 0.531285 |
76acb7ecdf23ded7666d3dce7c3cc3eb0ac952b5 | 7,753 | py | Python | classification.py | zharfi/Cidar | 626b37291786108d7eed21c4f700556116f15288 | [
"MIT"
] | null | null | null | classification.py | zharfi/Cidar | 626b37291786108d7eed21c4f700556116f15288 | [
"MIT"
] | null | null | null | classification.py | zharfi/Cidar | 626b37291786108d7eed21c4f700556116f15288 | [
"MIT"
] | null | null | null | import itertools
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.decomposition import KernelPCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from feature_extraction import FeatureExtraction
from helper import Helper
# Classify adalah kelas yang melaksanakan fungsi klasifikasi
class Classify:
# Inisialisasi model kecerdasan buatan yang telah dilatih sebelumnya
# Singkatan praproses:
# SCAL = Standard Scaler
# LDA = Linear Discriminant Analysis
# PCA = Principal Component Analysis
# Singkatan algoritma kecerdasan buatan:
# DT = Decision Tree
# KNN = K-Nearest Neighbour
# NB = Naive Bayes
# RF = Random Forest
# SVM = Support Vector Machine
model_fol = './Model'
scal = './Model/scale.sav'
proc_lda = './Model/lda.sav'
proc_pca = './Model/pca.sav'
model_dt = './Model/DT_noproc.sav'
model_knn = './Model/kNN_noproc.sav'
model_nb = './Model/NB_lda.sav' # awalnya './Model/NB_pca.sav'
model_rf = './Model/RF_noproc.sav'
model_svm = './Model/SVM_noproc.sav'
model_svm_giemsa = './Model/SVM_noproc_g.sav'
model_svm_wright = './Model/SVM_noproc_w.sav'
def __init__(self):
pass
# Fungsi untuk mengklasifikasi banyak citra sekaligus
# Citra yang dapat diklasifikasi hanya yang sudah di crop saja
def klasifikasiCitraBanyak(self, folder, method):
self.folder = folder
helper = Helper()
files = helper.listFiles(folder)
scale, proc, klas = self.loadModel(method)
fitur_banyak = []
hasil = []
for fil in files:
fe = FeatureExtraction()
fitur_banyak.append(fe.ekstraksifitur(fil))
hasil = self.klaf(scale, fitur_banyak, proc, method, klas)
return hasil
# Fungsi untuk mengklasifikasi data citra sel darah putih berformat teks
# Berkas teks disimpan dengan nama "Hasil Ekstraksi.txt"
# Formatnya adalah desimal menggunakan titik (.) dan pemisah koma (,), tidak ada headernya
# Contoh:
# 2034,20.4,133,1, ... , 15.45
def klasifikasiTeks(self, folder, method):
self.folder = folder
berkas_teks = open(folder + "/Hasil Ekstraksi.txt", "r")
fitur_banyak = []
hasil = []
if berkas_teks != None:
fitur_banyak = np.loadtxt(berkas_teks,delimiter=',')
scale, proc, klas = self.loadModel(method)
hasil = self.klaf(scale, fitur_banyak, proc, method, klas)
return hasil
# Fungsi untuk menghitung dan menampilkan confusion matrix
# Fungsi ini membutuhkan "truth.txt" yang berisi kelas citra
# Kelas citra basofil, eosinofil, limfosit, monosit, neutrofil, dan stab berurut dari 0-5
# Hanya pada citra dengan pewarnaan giemsa yang tidak mencantumkan basofil, sehingga urutan kelas 0-4
# Contoh:
# 5,1,0,0,4
#
# Tandanya baris pertama adalah stab, kedua eosinofil, dan kelima adalah neutrofil
def ambilConfusionMatrix(self, folder, prediksi):
self.folder = folder
truth_file = open(folder + "/truth.txt", "r")
if truth_file != None:
y_true = truth_file.read().split(",")
y_true_val = list(map(int, y_true))
conf = confusion_matrix(y_true_val, prediksi)
plt.figure()
classes = None
apakahWright = False
if apakahWright == True:
self.plot_confusion_matrix(conf, classes=[0, 1, 2, 3, 4, 5], title='Confusion matrix, without normalization')
else:
self.plot_confusion_matrix(conf, classes=[0, 1, 2, 3, 4], title='Confusion matrix, without normalization')
plt.show()
# Fungsi bantuan untuk membuat grafik confusion matrix
# Digunakan pada fungsi ambilConfusionMatrix()
def plot_confusion_matrix(self, cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Fungsi untuk memuat model terpilih yang telah tersimpan sebelumnya
# Semua model harus disimpan di folder ./Model
def loadModel(self, method):
scale = pickle.load(open(self.scal, 'rb'))
proc = None
klas = None
if method == 'Decision Tree':
klas = pickle.load(open(self.model_dt, 'rb'))
elif method == 'kNN':
klas = pickle.load(open(self.model_knn, 'rb'))
elif method == 'Neural Network':
dimension = 29
hidden_layers = [100, 100]
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=dimension)] # Banyaknya fitur
klas = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=hidden_layers,
n_classes=5, # banyak kelas, dalam darah ada 5
model_dir= self.model_fol)
elif method == 'Naive Bayes':
proc = pickle.load(open(self.proc_lda, 'rb')) # awalnya self.proc_pca
klas = pickle.load(open(self.model_nb, 'rb'))
elif method == 'Random Forest':
klas = pickle.load(open(self.model_rf, 'rb'))
elif method == 'SVM Giemsa':
klas = pickle.load(open(self.model_svm_giemsa, 'rb'))
elif method == 'SVM Wright':
klas = pickle.load(open(self.model_svm_wright, 'rb'))
else:
klas = pickle.load(open(self.model_svm, 'rb'))
return scale, proc, klas
# Fungsi utama dari kelas Classify()
# Berisi urutan eksekusi fungsi pada kelas ini
def klaf(self, scale, fitur, proc, method, klas):
fitur_scaled = []
fitur_fixed = []
hasil = []
if method == 'Neural Network':
fitur_fixed = np.array(fitur, dtype=float)
hasil = np.asarray(list(klas.predict(fitur_fixed, as_iterable = True)))
else:
if proc == None:
fitur_scaled = scale.transform(fitur)
hasil = klas.predict(fitur_scaled)
else:
fitur_scaled = scale.transform(fitur)
fitur_fixed = proc.transform(fitur_scaled)
hasil = klas.predict(fitur_fixed)
return hasil | 39.963918 | 125 | 0.616665 |
42cb286268dedc1711f4f2ca8c84342917f9dd3e | 10,831 | py | Python | tests/components/prometheus/test_init.py | bednar/core | b45a952d61d34e3a48a69d281a03234d7bfb418d | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | tests/components/prometheus/test_init.py | bednar/core | b45a952d61d34e3a48a69d281a03234d7bfb418d | [
"Apache-2.0"
] | 38 | 2020-07-23T07:14:08.000Z | 2022-03-31T06:01:46.000Z | tests/components/prometheus/test_init.py | bednar/core | b45a952d61d34e3a48a69d281a03234d7bfb418d | [
"Apache-2.0"
] | 3 | 2020-10-18T07:08:40.000Z | 2021-06-21T02:26:00.000Z | """The tests for the Prometheus exporter."""
from dataclasses import dataclass
import pytest
from homeassistant import setup
from homeassistant.components import climate, humidifier, sensor
from homeassistant.components.demo.sensor import DemoSensor
import homeassistant.components.prometheus as prometheus
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
DEGREE,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
EVENT_STATE_CHANGED,
)
from homeassistant.core import split_entity_id
from homeassistant.setup import async_setup_component
import tests.async_mock as mock
PROMETHEUS_PATH = "homeassistant.components.prometheus"
@dataclass
class FilterTest:
"""Class for capturing a filter test."""
id: str
should_pass: bool
@pytest.fixture
async def prometheus_client(loop, hass, hass_client):
"""Initialize an hass_client with Prometheus component."""
await async_setup_component(hass, prometheus.DOMAIN, {prometheus.DOMAIN: {}})
await setup.async_setup_component(
hass, sensor.DOMAIN, {"sensor": [{"platform": "demo"}]}
)
await setup.async_setup_component(
hass, climate.DOMAIN, {"climate": [{"platform": "demo"}]}
)
await hass.async_block_till_done()
await setup.async_setup_component(
hass, humidifier.DOMAIN, {"humidifier": [{"platform": "demo"}]}
)
sensor1 = DemoSensor(
None, "Television Energy", 74, None, ENERGY_KILO_WATT_HOUR, None
)
sensor1.hass = hass
sensor1.entity_id = "sensor.television_energy"
await sensor1.async_update_ha_state()
sensor2 = DemoSensor(
None, "Radio Energy", 14, DEVICE_CLASS_POWER, ENERGY_KILO_WATT_HOUR, None
)
sensor2.hass = hass
sensor2.entity_id = "sensor.radio_energy"
await sensor2.async_update_ha_state()
sensor3 = DemoSensor(
None, "Electricity price", 0.123, None, f"SEK/{ENERGY_KILO_WATT_HOUR}", None
)
sensor3.hass = hass
sensor3.entity_id = "sensor.electricity_price"
await sensor3.async_update_ha_state()
sensor4 = DemoSensor(None, "Wind Direction", 25, None, DEGREE, None)
sensor4.hass = hass
sensor4.entity_id = "sensor.wind_direction"
await sensor4.async_update_ha_state()
sensor5 = DemoSensor(
None,
"SPS30 PM <1µm Weight concentration",
3.7069,
None,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
None,
)
sensor5.hass = hass
sensor5.entity_id = "sensor.sps30_pm_1um_weight_concentration"
await sensor5.async_update_ha_state()
return await hass_client()
async def test_view(prometheus_client): # pylint: disable=redefined-outer-name
"""Test prometheus metrics view."""
resp = await prometheus_client.get(prometheus.API_ENDPOINT)
assert resp.status == 200
assert resp.headers["content-type"] == "text/plain"
body = await resp.text()
body = body.split("\n")
assert len(body) > 3
assert "# HELP python_info Python platform information" in body
assert (
"# HELP python_gc_objects_collected_total "
"Objects collected during gc" in body
)
assert (
'temperature_c{domain="sensor",'
'entity="sensor.outside_temperature",'
'friendly_name="Outside Temperature"} 15.6' in body
)
assert (
'battery_level_percent{domain="sensor",'
'entity="sensor.outside_temperature",'
'friendly_name="Outside Temperature"} 12.0' in body
)
assert (
'current_temperature_c{domain="climate",'
'entity="climate.heatpump",'
'friendly_name="HeatPump"} 25.0' in body
)
assert (
'humidifier_target_humidity_percent{domain="humidifier",'
'entity="humidifier.humidifier",'
'friendly_name="Humidifier"} 68.0' in body
)
assert (
'humidifier_state{domain="humidifier",'
'entity="humidifier.dehumidifier",'
'friendly_name="Dehumidifier"} 1.0' in body
)
assert (
'humidifier_mode{domain="humidifier",'
'entity="humidifier.hygrostat",'
'friendly_name="Hygrostat",'
'mode="home"} 1.0' in body
)
assert (
'humidifier_mode{domain="humidifier",'
'entity="humidifier.hygrostat",'
'friendly_name="Hygrostat",'
'mode="eco"} 0.0' in body
)
assert (
'humidity_percent{domain="sensor",'
'entity="sensor.outside_humidity",'
'friendly_name="Outside Humidity"} 54.0' in body
)
assert (
'sensor_unit_kwh{domain="sensor",'
'entity="sensor.television_energy",'
'friendly_name="Television Energy"} 74.0' in body
)
assert (
'power_kwh{domain="sensor",'
'entity="sensor.radio_energy",'
'friendly_name="Radio Energy"} 14.0' in body
)
assert (
'sensor_unit_sek_per_kwh{domain="sensor",'
'entity="sensor.electricity_price",'
'friendly_name="Electricity price"} 0.123' in body
)
assert (
'sensor_unit_u0xb0{domain="sensor",'
'entity="sensor.wind_direction",'
'friendly_name="Wind Direction"} 25.0' in body
)
assert (
'sensor_unit_u0xb5g_per_mu0xb3{domain="sensor",'
'entity="sensor.sps30_pm_1um_weight_concentration",'
'friendly_name="SPS30 PM <1µm Weight concentration"} 3.7069' in body
)
@pytest.fixture(name="mock_client")
def mock_client_fixture():
"""Mock the prometheus client."""
with mock.patch(f"{PROMETHEUS_PATH}.prometheus_client") as client:
counter_client = mock.MagicMock()
client.Counter = mock.MagicMock(return_value=counter_client)
setattr(counter_client, "labels", mock.MagicMock(return_value=mock.MagicMock()))
yield counter_client
@pytest.fixture
def mock_bus(hass):
"""Mock the event bus listener."""
hass.bus.listen = mock.MagicMock()
@pytest.mark.usefixtures("mock_bus")
async def test_minimal_config(hass, mock_client):
"""Test the minimal config and defaults of component."""
config = {prometheus.DOMAIN: {}}
assert await async_setup_component(hass, prometheus.DOMAIN, config)
await hass.async_block_till_done()
assert hass.bus.listen.called
assert EVENT_STATE_CHANGED == hass.bus.listen.call_args_list[0][0][0]
@pytest.mark.usefixtures("mock_bus")
async def test_full_config(hass, mock_client):
"""Test the full config of component."""
config = {
prometheus.DOMAIN: {
"namespace": "ns",
"default_metric": "m",
"override_metric": "m",
"component_config": {"fake.test": {"override_metric": "km"}},
"component_config_glob": {"fake.time_*": {"override_metric": "h"}},
"component_config_domain": {"climate": {"override_metric": "°C"}},
"filter": {
"include_domains": ["climate"],
"include_entity_globs": ["fake.time_*"],
"include_entities": ["fake.test"],
"exclude_domains": ["script"],
"exclude_entity_globs": ["climate.excluded_*"],
"exclude_entities": ["fake.time_excluded"],
},
}
}
assert await async_setup_component(hass, prometheus.DOMAIN, config)
await hass.async_block_till_done()
assert hass.bus.listen.called
assert EVENT_STATE_CHANGED == hass.bus.listen.call_args_list[0][0][0]
def make_event(entity_id):
"""Make a mock event for test."""
domain = split_entity_id(entity_id)[0]
state = mock.MagicMock(
state="not blank",
domain=domain,
entity_id=entity_id,
object_id="entity",
attributes={},
)
return mock.MagicMock(data={"new_state": state}, time_fired=12345)
async def _setup(hass, filter_config):
"""Shared set up for filtering tests."""
config = {prometheus.DOMAIN: {"filter": filter_config}}
assert await async_setup_component(hass, prometheus.DOMAIN, config)
await hass.async_block_till_done()
return hass.bus.listen.call_args_list[0][0][1]
@pytest.mark.usefixtures("mock_bus")
async def test_allowlist(hass, mock_client):
"""Test an allowlist only config."""
handler_method = await _setup(
hass,
{
"include_domains": ["fake"],
"include_entity_globs": ["test.included_*"],
"include_entities": ["not_real.included"],
},
)
tests = [
FilterTest("climate.excluded", False),
FilterTest("fake.included", True),
FilterTest("test.excluded_test", False),
FilterTest("test.included_test", True),
FilterTest("not_real.included", True),
FilterTest("not_real.excluded", False),
]
for test in tests:
event = make_event(test.id)
handler_method(event)
was_called = mock_client.labels.call_count == 1
assert test.should_pass == was_called
mock_client.labels.reset_mock()
@pytest.mark.usefixtures("mock_bus")
async def test_denylist(hass, mock_client):
"""Test a denylist only config."""
handler_method = await _setup(
hass,
{
"exclude_domains": ["fake"],
"exclude_entity_globs": ["test.excluded_*"],
"exclude_entities": ["not_real.excluded"],
},
)
tests = [
FilterTest("fake.excluded", False),
FilterTest("light.included", True),
FilterTest("test.excluded_test", False),
FilterTest("test.included_test", True),
FilterTest("not_real.included", True),
FilterTest("not_real.excluded", False),
]
for test in tests:
event = make_event(test.id)
handler_method(event)
was_called = mock_client.labels.call_count == 1
assert test.should_pass == was_called
mock_client.labels.reset_mock()
@pytest.mark.usefixtures("mock_bus")
async def test_filtered_denylist(hass, mock_client):
"""Test a denylist config with a filtering allowlist."""
handler_method = await _setup(
hass,
{
"include_entities": ["fake.included", "test.excluded_test"],
"exclude_domains": ["fake"],
"exclude_entity_globs": ["*.excluded_*"],
"exclude_entities": ["not_real.excluded"],
},
)
tests = [
FilterTest("fake.excluded", False),
FilterTest("fake.included", True),
FilterTest("alt_fake.excluded_test", False),
FilterTest("test.excluded_test", True),
FilterTest("not_real.excluded", False),
FilterTest("not_real.included", True),
]
for test in tests:
event = make_event(test.id)
handler_method(event)
was_called = mock_client.labels.call_count == 1
assert test.should_pass == was_called
mock_client.labels.reset_mock()
| 30.769886 | 88 | 0.646385 |
d4a5ce6e46b287a64c5029d3f9559c3fb8c84dc3 | 688 | py | Python | wisdom_pets_app/wisdompets/adoptions/models.py | comphonia/python-django_wisdompets | 399cc0def7b3d6a1e131fc951ea87cf48f41eb2b | [
"MIT"
] | null | null | null | wisdom_pets_app/wisdompets/adoptions/models.py | comphonia/python-django_wisdompets | 399cc0def7b3d6a1e131fc951ea87cf48f41eb2b | [
"MIT"
] | null | null | null | wisdom_pets_app/wisdompets/adoptions/models.py | comphonia/python-django_wisdompets | 399cc0def7b3d6a1e131fc951ea87cf48f41eb2b | [
"MIT"
] | null | null | null | from django.db import models
class Pet(models.Model):
SEX_CHOICES= [('M','Male'),('F','Female')]
name = models.CharField(max_length=100)
submitter = models.CharField(max_length=100)
species = models.CharField(max_length=30)
breed = models.CharField(max_length=30, blank=True)
description = models.TextField()
sex = models.CharField(choices = SEX_CHOICES, max_length=1, blank=True)
submission_date = models.DateTimeField()
age = models.IntegerField(null=True)
vaccinations = models.ManyToManyField('Vaccine', blank=True)
class Vaccine(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
| 32.761905 | 75 | 0.706395 |
72be59aaff288f5f6a642505f025321db17b85fc | 2,813 | py | Python | engine/dlib/src/test/test_dlib.py | Krzlfx/defold | ce38de254c08d0f3c63fabee07b44cbf1433a428 | [
"ECL-2.0",
"Apache-2.0"
] | 2,231 | 2020-05-19T08:25:22.000Z | 2022-03-31T18:51:23.000Z | engine/dlib/src/test/test_dlib.py | Krzlfx/defold | ce38de254c08d0f3c63fabee07b44cbf1433a428 | [
"ECL-2.0",
"Apache-2.0"
] | 1,485 | 2020-05-19T10:56:52.000Z | 2022-03-31T16:12:42.000Z | engine/dlib/src/test/test_dlib.py | Krzlfx/defold | ce38de254c08d0f3c63fabee07b44cbf1433a428 | [
"ECL-2.0",
"Apache-2.0"
] | 197 | 2020-05-19T10:20:47.000Z | 2022-03-30T09:52:13.000Z | # Copyright 2020 The Defold Foundation
# Licensed under the Defold License version 1.0 (the "License"); you may not use
# this file except in compliance with the License.
#
# You may obtain a copy of the License, together with FAQs at
# https://www.defold.com/license
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import unittest
import dlib
from os import path
class TestDlib(unittest.TestCase):
def testHash(self):
h1 = dlib.dmHashBuffer32("foo")
h2 = dlib.dmHashBuffer64("foo")
self.assertEqual(0xd861e2f7L, h1)
self.assertEqual(0x97b476b3e71147f7L, h2)
def testLZ4(self):
#
# Test to compress/decompress a short text...
#
uncompressed = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas ornare non massa a imperdiet. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus lorem diam, ultricies et dapibus vel, interdum consequat metus. Cras suscipit bibendum dignissim. Maecenas non lectus enim. Integer rhoncus fringilla felis in dictum. Nunc justo risus, volutpat id elementum molestie, eleifend sit amet tellus. Mauris accumsan ornare justo, sed cursus justo bibendum et. Vestibulum massa metus, rutrum id libero a, ultrices varius arcu. Sed sed condimentum enim. Mauris ac sodales lacus, at lacinia est. Nunc sed libero ac neque cursus venenatis. Phasellus laoreet est quis massa cursus."
uncompressed_len = len(uncompressed)
max_compressed_size = dlib.dmLZ4MaxCompressedSize(uncompressed_len)
self.assertGreater(max_compressed_size, 0)
compressed = dlib.dmLZ4CompressBuffer(uncompressed, uncompressed_len, max_compressed_size)
decompressed = dlib.dmLZ4DecompressBuffer(compressed, uncompressed_len)
self.assertEqual(decompressed, uncompressed)
#
# Test to decompress lz4 encoded file
#
foo_file_size = path.getsize("data/foo.lz4")
foo_f = open("data/foo.lz4", "rb")
foo_compressed = foo_f.read(foo_file_size)
foo_f.close()
foo_decompressed = dlib.dmLZ4DecompressBuffer(foo_compressed, 3)
self.assertEqual(foo_decompressed, "foo")
def testCrypt(self):
key = "12345678abcdefgh"
s = "ABCDEFGH12345678XYZ"
enc = dlib.dmEncryptXTeaCTR(s, key)
self.assertEqual("\x81\xb4\xa1\x04\x2d\xac\xe5\xcb\x77\x89\xec\x11\x61\xc3\xdc\xfa\xb9\xa3\x25", enc)
dec = dlib.dmDecryptXTeaCTR(enc, key)
self.assertEqual(s, dec)
if __name__ == '__main__':
unittest.main()
| 47.677966 | 707 | 0.723427 |
0d0f25af145e05e7ec754e18ab35c20f8c4a6f03 | 3,731 | py | Python | sandbox/apps/python/multigrid/jacobi2D/exec_mg.py | bollu/polymage | 517657142cc3ae74e9daff3b41a0257d6a4ce2b6 | [
"Apache-2.0"
] | 10 | 2016-07-22T06:53:11.000Z | 2021-02-19T06:22:00.000Z | sandbox/apps/python/multigrid/jacobi2D/exec_mg.py | bollu/polymage | 517657142cc3ae74e9daff3b41a0257d6a4ce2b6 | [
"Apache-2.0"
] | null | null | null | sandbox/apps/python/multigrid/jacobi2D/exec_mg.py | bollu/polymage | 517657142cc3ae74e9daff3b41a0257d6a4ce2b6 | [
"Apache-2.0"
] | 2 | 2017-11-21T20:29:36.000Z | 2021-05-21T01:52:05.000Z | from __init__ import *
import sys
import os
import ctypes
import numpy as np
import time
from utils import *
from printer import print_line, print_layout, print_errors
def minimal_exec_mg(pipe_lib, pipe_lib_func, func_params,
func_args, tuner_data, app_data):
it = 0
it_max = app_data['nit']
pool_alloc = app_data['pool_alloc'] # bool
grid_data = app_data['grid_data']
cycle_name = app_data['cycle_name']
# build function argument list based on the iteration,
# even : in = U_ : out = W_
# odd : in = W_ : out = U_
func_args = []
arg_data = {}
n = app_data['n']
arg_data['n'] = n
arg_data['F_'] = grid_data['F_']
arg_data['V_'] = grid_data['U_']
arg_data[cycle_name] = grid_data['W_']
func_args.append(map_cfunc_args(func_params, arg_data))
arg_data['V_'] = grid_data['W_']
arg_data[cycle_name] = grid_data['U_']
func_args.append(map_cfunc_args(func_params, arg_data))
if pool_alloc:
pipe_lib.pool_init()
while it < it_max:
pipe_lib_func(*(func_args[it%2]))
it += 1
if pool_alloc:
pipe_lib.pool_destroy()
return
def calc_norm(U_, app_data):
N = app_data['N']
grid_data = app_data['grid_data']
F_ = grid_data['F_']
U_EXACT_ = grid_data['U_EXACT_']
# lib function name
norm = app_data['pipeline_norm']
resid = np.zeros((1), np.float64)
err = np.zeros((1), np.float64)
# lib function args
norm_args = []
norm_args += [ctypes.c_int(N)]
norm_args += [ctypes.c_void_p(F_.ctypes.data)]
norm_args += [ctypes.c_void_p(U_EXACT_.ctypes.data)]
norm_args += [ctypes.c_void_p(U_.ctypes.data)]
norm_args += [ctypes.c_void_p(err.ctypes.data)]
norm_args += [ctypes.c_void_p(resid.ctypes.data)]
# call lib function
norm(*norm_args)
# save the old norm values
app_data['old_residual'] = app_data['resid']
app_data['old_err'] = app_data['err']
# register the norm values in the data dictionary
app_data['resid'] = resid[0]
app_data['err'] = err[0]
return
def call_mg_cycle(U_, W_, app_data):
n = app_data['n']
grid_data = app_data['grid_data']
F_ = grid_data['F_']
# lib function name
func_name = 'pipeline_'+app_data['cycle_name']
mg_cycle_func = app_data[func_name]
# lib function args
mg_cycle_args = []
mg_cycle_args += [ctypes.c_int(n)]
mg_cycle_args += [ctypes.c_void_p(F_.ctypes.data)]
mg_cycle_args += [ctypes.c_void_p(U_.ctypes.data)]
mg_cycle_args += [ctypes.c_void_p(W_.ctypes.data)]
# call lib function
mg_cycle_func(*mg_cycle_args)
return
def multigrid(app_data):
grid_data = app_data['grid_data']
U_ = grid_data['U_']
W_ = grid_data['W_']
UW = [U_, W_]
nit = app_data['nit']
time_store = {}
time_taken = 0
print_layout(app_data)
print_errors(0, app_data)
timer = app_data['timer']
nruns = int(app_data['runs'])
run = 0
while run < nruns:
it = 0
if timer:
t1 = time.time()
while it < nit:
it += 1
call_mg_cycle(UW[(it-1)%2], UW[it%2], app_data)
if not timer:
calc_norm(UW[it%2], app_data)
print_errors(it, app_data)
if timer:
t2 = time.time()
time_store[run] = float(t2) - float(t1)
#print("Time taken for iter ", run," = ",time_store[run]*1000, "ms")
time_taken += time_store[run]
run += 1
if timer:
time_taken = time_taken / nruns
print("")
print("[exec_mg] : Average time taken to execute = ",
time_taken*1000, " ms")
return
| 24.708609 | 80 | 0.605736 |
6b7609b4e9b0848f382809a1fd4b36afa06f9bd8 | 1,998 | py | Python | pypy/translator/js/examples/bnb/msgstruct.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/translator/js/examples/bnb/msgstruct.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/translator/js/examples/bnb/msgstruct.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | from struct import pack, unpack, calcsize
try:
from localmsg import PORTS
except ImportError:
PORTS = {}
try:
from localmsg import HOSTNAME
except ImportError:
from socket import gethostname
HOSTNAME = gethostname()
MSG_WELCOME = "Welcome to gamesrv.py(3) !\n"
MSG_BROADCAST_PORT= "*"
MSG_DEF_PLAYFIELD = "p"
MSG_DEF_KEY = "k"
MSG_DEF_ICON = "r"
MSG_DEF_BITMAP = "m"
MSG_DEF_SAMPLE = "w"
MSG_DEF_MUSIC = "z"
MSG_PLAY_MUSIC = "Z"
MSG_FADEOUT = "f"
MSG_PLAYER_JOIN = "+"
MSG_PLAYER_KILL = "-"
MSG_PLAYER_ICON = "i"
MSG_PING = "g"
MSG_PONG = "G"
MSG_INLINE_FRAME = "\\"
MSG_PATCH_FILE = MSG_DEF_MUSIC
MSG_ZPATCH_FILE = "P"
MSG_MD5_FILE = "M"
MSG_RECORDED = "\x00"
CMSG_PROTO_VERSION= "v"
CMSG_KEY = "k"
CMSG_ADD_PLAYER = "+"
CMSG_REMOVE_PLAYER= "-"
CMSG_UDP_PORT = "<"
CMSG_ENABLE_SOUND = "s"
CMSG_ENABLE_MUSIC = "m"
CMSG_PING = "g"
CMSG_PONG = "G"
CMSG_DATA_REQUEST = "M"
CMSG_PLAYER_NAME = "n"
BROADCAST_MESSAGE = "game!" # less than 6 bytes
def message(tp, *values):
strtype = type('')
typecodes = ['']
for v in values:
if type(v) is strtype:
typecodes.append('%ds' % len(v))
elif 0 <= v < 256:
typecodes.append('B')
else:
typecodes.append('l')
typecodes = ''.join(typecodes)
assert len(typecodes) < 256
return pack(("!B%dsc" % len(typecodes)) + typecodes,
len(typecodes), typecodes, tp, *values)
def decodemessage(data):
if data:
limit = ord(data[0]) + 1
if len(data) >= limit:
typecodes = "!c" + data[1:limit]
try:
end = limit + calcsize(typecodes)
except TypeError:
return None, ''
if len(data) >= end:
return unpack(typecodes, data[limit:end]), data[end:]
elif end > 1000000:
raise OverflowError
return None, data
| 25.291139 | 69 | 0.576577 |
e3568a07c42ad23a602c677bbc36542963548051 | 10,194 | py | Python | freezer/storage/base.py | mr-smart/freezer | b268bea3d10727bcc043eb26d1396411a40513c3 | [
"MIT"
] | null | null | null | freezer/storage/base.py | mr-smart/freezer | b268bea3d10727bcc043eb26d1396411a40513c3 | [
"MIT"
] | null | null | null | freezer/storage/base.py | mr-smart/freezer | b268bea3d10727bcc043eb26d1396411a40513c3 | [
"MIT"
] | 1 | 2019-12-03T15:38:27.000Z | 2019-12-03T15:38:27.000Z | # (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
# (c) Copyright 2016 Hewlett-Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import tempfile
from oslo_log import log
import six
from freezer.utils import utils
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Storage(object):
"""
Any freezer storage implementation should be inherited from this abstract
class.
"""
_type = None
def __init__(self, skip_prepare=False):
if not skip_prepare:
self.prepare()
@abc.abstractmethod
def get_file(self, from_path, to_path):
pass
@abc.abstractmethod
def write_backup(self, rich_queue, backup):
"""
:param rich_queue:
:param backup:
:type backup: freezer.storage.base.Backup
:return:
"""
pass
@abc.abstractmethod
def prepare(self):
"""
Creates directories, containers
:return: nothing
"""
pass
@abc.abstractmethod
def get_level_zero(self,
engine,
hostname_backup_name,
recent_to_date=None):
"""
Gets backups by backup_name and hostname
:type engine: freezer.engine.engine.BackupEngine
:param hostname_backup_name:
:type hostname_backup_name: str
:type recent_to_date: int
:param recent_to_date:
:rtype: collections.Iterable[freezer.storage.base.Backup]
:return: dictionary of level zero timestamps with attached storage
"""
pass
@property
def type(self):
return self._type
def get_latest_level_zero_increments(self, engine, hostname_backup_name,
recent_to_date=None):
"""
Returns the latest zero level backup with increments
:param engine:
:param hostname_backup_name:
:param recent_to_date:
:rtype: dict[int, freezer.storage.base.Backup]
:return: Dictionary[backup_level, backup]
"""
zeros = self.get_level_zero(engine=engine,
hostname_backup_name=hostname_backup_name,
recent_to_date=recent_to_date)
if not zeros:
err_msg = 'No matching backup name "{0}" found'.format(
hostname_backup_name
)
raise IndexError(err_msg)
backup = max(zeros, key=lambda backup: backup.timestamp)
""":type : freezer.storage.base.Backup"""
increments = backup.get_increments()
return {level: backup for level, backup in increments.iteritems()
if not recent_to_date or backup.timestamp <= recent_to_date}
def remove_older_than(self, engine, remove_older_timestamp,
hostname_backup_name):
"""
Removes backups which are older than or equal to the specified
timestamp
:type engine: freezer.engine.engine.BackupEngine
:type remove_older_timestamp: int
:type hostname_backup_name: str
"""
backups = self.get_level_zero(engine, hostname_backup_name,
remove_older_timestamp)
for backup in backups:
backup.remove()
@abc.abstractmethod
def info(self):
pass
def previous_backup(self, engine, hostname_backup_name, no_incremental,
max_level, always_level, restart_always_level):
"""
:type engine: freezer.engine.engine.BackupEngine
:param engine: engine instance
:param hostname_backup_name: name of backup
:param no_incremental:
:param max_level:
:param always_level:
:param restart_always_level:
:return:
"""
if no_incremental:
return None
try:
increments = self.get_latest_level_zero_increments(
engine=engine,
hostname_backup_name=hostname_backup_name)
highest_level = max(increments.keys())
highest_level_backup = increments[highest_level]
if max_level and max_level <= highest_level:
return None
if always_level and highest_level > always_level:
return None
expire_time = (highest_level_backup.timestamp +
restart_always_level * 86400)
if restart_always_level and utils.DateTime.now().timestamp > \
expire_time:
return None
if always_level and highest_level == always_level:
return increments[highest_level - 1]
return highest_level_backup
except IndexError:
return None
@abc.abstractmethod
def put_file(self, from_path, to_path):
"""
:type from_path: str
:param from_path:
:type to_path: str
:param to_path:
"""
pass
@abc.abstractmethod
def put_metadata(self,
engine_metadata_path,
freezer_metadata_path,
backup):
"""
:param engine_metadata_path:
:param freezer_metadata_path:
:type backup: freezer.storage.base.Backup
:param backup:
:return:
"""
pass
@abc.abstractmethod
def create_dirs(self, path):
pass
class Backup(object):
"""
Internal freezer representation of backup.
Includes:
* name (hostname_backup_name) of backup
* timestamp of backup (when it was executed)
* level of backup (freezer supports incremental backup):
Completed full backup has level 0 and can be restored without any
additional information.
Levels 1, 2, ... means that our backup is incremental and contains
only smart portion of information (that was actually changed
since the last backup)
"""
def __init__(self, engine, hostname_backup_name,
level_zero_timestamp, timestamp, level, storage=None):
"""
:type storage: freezer.storage.physical.PhysicalStorage
:param hostname_backup_name: name (hostname_backup_name) of backup
:type hostname_backup_name: str
:param timestamp: timestamp of backup (when it was executed)
:type timestamp: int
:param level: current incremental level of backup
:type level: int
:return:
"""
self.hostname_backup_name = hostname_backup_name
self.timestamp = timestamp
self.level = level
self.engine = engine
self.storage = storage
self.level_zero_timestamp = level_zero_timestamp
if storage:
self.increments_data_path = utils.path_join(
self.storage.storage_path, "data", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.increments_metadata_path = utils.path_join(
self.storage.storage_path, "metadata", self.engine.name,
self.hostname_backup_name, self.level_zero_timestamp)
self.data_prefix_path = utils.path_join(
self.increments_data_path,
"{0}_{1}".format(self.level, self.timestamp))
self.engine_metadata_path = utils.path_join(
self.data_prefix_path, "engine_metadata")
self.metadata_path = utils.path_join(
self.increments_metadata_path,
"{0}_{1}".format(self.level, self.timestamp), "metadata")
self.data_path = utils.path_join(self.data_prefix_path, "data")
self.segments_path = utils.path_join(self.data_prefix_path,
"segments")
def copy(self, storage):
"""
:type storage: freezer.storage.physical.PhysicalStorage
:return:
"""
return Backup(
engine=self.engine,
hostname_backup_name=self.hostname_backup_name,
level_zero_timestamp=self.level_zero_timestamp,
timestamp=self.timestamp,
level=self.level,
storage=storage)
def remove(self):
self.storage.rmtree(self.increments_metadata_path)
self.storage.rmtree(self.increments_data_path)
def get_increments(self):
"""
Gets all incremental backups based on a level-zero backup with
timestamp
:rtype: dict[int, freezer.storage.base.Backup]
:return: Dictionary[backup_level, backup]
"""
increments = self.storage.listdir(self.increments_metadata_path)
sorted(increments)
increments = [name.split('_') for name in increments]
return {int(increment[0]): Backup(
storage=self.storage,
engine=self.engine,
hostname_backup_name=self.hostname_backup_name,
timestamp=int(increment[1]),
level_zero_timestamp=self.level_zero_timestamp,
level=int(increment[0])
) for increment in increments}
def _get_file(self, filename):
file = tempfile.NamedTemporaryFile('wb', delete=True)
self.storage.get_file(filename, file.name)
with open(file.name) as f:
content = f.readlines()
LOG.info("Content download {0}".format(content))
file.close()
return json.loads(content[0])
def metadata(self):
return self._get_file(self.metadata_path)
def engine_metadata(self):
return self._get_file(self.engine_metadata_path)
| 34.439189 | 78 | 0.617912 |
ac6528289c086021e4777e6a98c1929a3faafbad | 3,594 | py | Python | interactive/test/test_transitivitytable.py | diorge/interactive-sort | f60fbb6686ab8cdbc01d056c8976b7c983420565 | [
"MIT"
] | 1 | 2019-03-07T02:33:01.000Z | 2019-03-07T02:33:01.000Z | interactive/test/test_transitivitytable.py | diorge/interactive-sort | f60fbb6686ab8cdbc01d056c8976b7c983420565 | [
"MIT"
] | null | null | null | interactive/test/test_transitivitytable.py | diorge/interactive-sort | f60fbb6686ab8cdbc01d056c8976b7c983420565 | [
"MIT"
] | null | null | null | """ Tests for the Transitivity Table class """
from unittest import TestCase
import interactive.sort as isort
class TestTransitivityTable(TestCase):
""" Test suite for the TransitivityTable class """
dataset = ['a', 'b', 'c', 'd']
def test_construction_dimension(self):
""" Tests basic construction and dimension """
table = isort.TransitivityTable(self.dataset)
assert len(self.dataset) == table.dimension
def test_empty_state(self):
""" Tests check for ordering when table is empty """
table = isort.TransitivityTable(self.dataset)
assert not table.ishigher('a', 'b')
assert not table.ishigher('b', 'a')
assert isort.Ordering.Unknown == table.orderof('a', 'b')
def test_set_and_test(self):
""" Tests for setting an ordering and immediately checking it """
table = isort.TransitivityTable(self.dataset)
table.order('a', 'b', isort.Ordering.Higher)
assert table.ishigher('a', 'b')
def test_set_and_reflectivity(self):
""" Tests for setting an ordering and checking its reflection """
table = isort.TransitivityTable(self.dataset)
table.order('a', 'b', isort.Ordering.Lower)
assert table.ishigher('b', 'a')
def test_transitivity(self):
""" Tests the transitivity check """
table = isort.TransitivityTable(self.dataset)
table.order('a', 'b', isort.Ordering.Lower)
table.order('b', 'c', isort.Ordering.Lower)
assert table.ishigher('c', 'a')
def test_construction_invalid_iterator(self):
""" Tests the construction when passing an invalid iterator """
with self.assertRaises(TypeError) as e:
table = isort.TransitivityTable(10)
def test_construction_empty_iterator(self):
""" Tests the construction when passing an empty iterator """
table = isort.TransitivityTable([])
assert 0 == table.dimension
def test_orderof_not_existant_origin(self):
""" Tests the orderof method when origin is not in the set """
table = isort.TransitivityTable(self.dataset)
with self.assertRaises(KeyError) as e:
s = table.orderof('z', 'a')
def test_orderof_not_existant_target(self):
""" Tests the orderof method when target is not in the set """
table = isort.TransitivityTable(self.dataset)
with self.assertRaises(KeyError) as e:
s = table.orderof('a', 'z')
def test_order_not_existant_origin(self):
""" Tests the order method when giving an invalid origin """
table = isort.TransitivityTable(self.dataset)
with self.assertRaises(ValueError) as e:
table.order('z', 'a', isort.Ordering.Higher)
def test_order_not_existant_target(self):
""" Tests the order method when giving an invalid target """
table = isort.TransitivityTable(self.dataset)
with self.assertRaises(ValueError) as e:
table.order('a', 5, isort.Ordering.Unknown)
def test_order_invalid_value(self):
""" Tests the order method when value is not an Ordering """
table = isort.TransitivityTable(self.dataset)
with self.assertRaises(TypeError) as e:
table.order('a', 'b', 5)
def test_order_multiple_times(self):
""" Try to give multiple orderings to a pair """
table = isort.TransitivityTable(self.dataset)
table.order('a', 'b', isort.Ordering.Lower)
with self.assertRaises(ValueError) as e:
table.order('b', 'a', isort.Ordering.Lower)
| 39.933333 | 73 | 0.649137 |
564e51bfe8aae57bc5a2e92bd4a452d0365ccc39 | 13,589 | py | Python | shamrock/seeder/crawl_store.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | shamrock/seeder/crawl_store.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | shamrock/seeder/crawl_store.py | zcomputerwiz/shamrock-blockchain | 2e2d8a134f0147379812085543ac98f37ce28c2b | [
"Apache-2.0"
] | null | null | null | import asyncio
import dataclasses
import ipaddress
import logging
import random
import time
from typing import List, Dict
import aiosqlite
from shamrock.seeder.peer_record import PeerRecord, PeerReliability
log = logging.getLogger(__name__)
class CrawlStore:
crawl_db: aiosqlite.Connection
last_timestamp: int
lock: asyncio.Lock
host_to_records: Dict
host_to_selected_time: Dict
host_to_reliability: Dict
banned_peers: int
ignored_peers: int
reliable_peers: int
@classmethod
async def create(cls, connection: aiosqlite.Connection):
self = cls()
self.crawl_db = connection
await self.crawl_db.execute(
(
"CREATE TABLE IF NOT EXISTS peer_records("
" peer_id text PRIMARY KEY,"
" ip_address text,"
" port bigint,"
" connected int,"
" last_try_timestamp bigint,"
" try_count bigint,"
" connected_timestamp bigint,"
" added_timestamp bigint,"
" best_timestamp bigint,"
" version text,"
" handshake_time text)"
)
)
await self.crawl_db.execute(
(
"CREATE TABLE IF NOT EXISTS peer_reliability("
" peer_id text PRIMARY KEY,"
" ignore_till int, ban_till int,"
" stat_2h_w real, stat_2h_c real, stat_2h_r real,"
" stat_8h_w real, stat_8h_c real, stat_8h_r real,"
" stat_1d_w real, stat_1d_c real, stat_1d_r real,"
" stat_1w_w real, stat_1w_c real, stat_1w_r real,"
" stat_1m_w real, stat_1m_c real, stat_1m_r real,"
" tries int, successes int)"
)
)
await self.crawl_db.execute(("CREATE TABLE IF NOT EXISTS good_peers(ip text)"))
await self.crawl_db.execute("CREATE INDEX IF NOT EXISTS ip_address on peer_records(ip_address)")
await self.crawl_db.execute("CREATE INDEX IF NOT EXISTS port on peer_records(port)")
await self.crawl_db.execute("CREATE INDEX IF NOT EXISTS connected on peer_records(connected)")
await self.crawl_db.execute("CREATE INDEX IF NOT EXISTS added_timestamp on peer_records(added_timestamp)")
await self.crawl_db.execute("CREATE INDEX IF NOT EXISTS peer_id on peer_reliability(peer_id)")
await self.crawl_db.execute("CREATE INDEX IF NOT EXISTS ignore_till on peer_reliability(ignore_till)")
await self.crawl_db.commit()
self.last_timestamp = 0
self.ignored_peers = 0
self.banned_peers = 0
self.reliable_peers = 0
self.host_to_selected_time = {}
await self.unload_from_db()
return self
def maybe_add_peer(self, peer_record: PeerRecord, peer_reliability: PeerReliability):
if peer_record.peer_id not in self.host_to_records:
self.host_to_records[peer_record.peer_id] = peer_record
if peer_reliability.peer_id not in self.host_to_reliability:
self.host_to_reliability[peer_reliability.peer_id] = peer_reliability
async def add_peer(self, peer_record: PeerRecord, peer_reliability: PeerReliability, save_db: bool = False):
if not save_db:
self.host_to_records[peer_record.peer_id] = peer_record
self.host_to_reliability[peer_reliability.peer_id] = peer_reliability
return
added_timestamp = int(time.time())
cursor = await self.crawl_db.execute(
"INSERT OR REPLACE INTO peer_records VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
peer_record.peer_id,
peer_record.ip_address,
peer_record.port,
int(peer_record.connected),
peer_record.last_try_timestamp,
peer_record.try_count,
peer_record.connected_timestamp,
added_timestamp,
peer_record.best_timestamp,
peer_record.version,
peer_record.handshake_time,
),
)
await cursor.close()
cursor = await self.crawl_db.execute(
"INSERT OR REPLACE INTO peer_reliability"
" VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
peer_reliability.peer_id,
peer_reliability.ignore_till,
peer_reliability.ban_till,
peer_reliability.stat_2h.weight,
peer_reliability.stat_2h.count,
peer_reliability.stat_2h.reliability,
peer_reliability.stat_8h.weight,
peer_reliability.stat_8h.count,
peer_reliability.stat_8h.reliability,
peer_reliability.stat_1d.weight,
peer_reliability.stat_1d.count,
peer_reliability.stat_1d.reliability,
peer_reliability.stat_1w.weight,
peer_reliability.stat_1w.count,
peer_reliability.stat_1w.reliability,
peer_reliability.stat_1m.weight,
peer_reliability.stat_1m.count,
peer_reliability.stat_1m.reliability,
peer_reliability.tries,
peer_reliability.successes,
),
)
await cursor.close()
async def get_peer_reliability(self, peer_id: str) -> PeerReliability:
return self.host_to_reliability[peer_id]
async def peer_failed_to_connect(self, peer: PeerRecord):
now = int(time.time())
age_timestamp = int(max(peer.last_try_timestamp, peer.connected_timestamp))
if age_timestamp == 0:
age_timestamp = now - 1000
replaced = dataclasses.replace(peer, try_count=peer.try_count + 1, last_try_timestamp=now)
reliability = await self.get_peer_reliability(peer.peer_id)
if reliability is None:
reliability = PeerReliability(peer.peer_id)
reliability.update(False, now - age_timestamp)
await self.add_peer(replaced, reliability)
async def peer_connected(self, peer: PeerRecord):
now = int(time.time())
age_timestamp = int(max(peer.last_try_timestamp, peer.connected_timestamp))
if age_timestamp == 0:
age_timestamp = now - 1000
replaced = dataclasses.replace(peer, connected=True, connected_timestamp=now)
reliability = await self.get_peer_reliability(peer.peer_id)
if reliability is None:
reliability = PeerReliability(peer.peer_id)
reliability.update(True, now - age_timestamp)
await self.add_peer(replaced, reliability)
async def update_best_timestamp(self, host: str, timestamp):
if host not in self.host_to_records:
return
record = self.host_to_records[host]
replaced = dataclasses.replace(record, best_timestamp=timestamp)
if host not in self.host_to_reliability:
return
reliability = self.host_to_reliability[host]
await self.add_peer(replaced, reliability)
async def peer_connected_hostname(self, host: str, connected: bool = True):
if host not in self.host_to_records:
return
record = self.host_to_records[host]
if connected:
await self.peer_connected(record)
else:
await self.peer_failed_to_connect(record)
async def get_peers_to_crawl(self, min_batch_size, max_batch_size) -> List[PeerRecord]:
now = int(time.time())
records = []
records_v6 = []
counter = 0
self.ignored_peers = 0
self.banned_peers = 0
for peer_id in self.host_to_reliability:
add = False
counter += 1
reliability = self.host_to_reliability[peer_id]
if reliability.ignore_till < now and reliability.ban_till < now:
add = True
else:
if reliability.ban_till >= now:
self.banned_peers += 1
elif reliability.ignore_till >= now:
self.ignored_peers += 1
record = self.host_to_records[peer_id]
if record.last_try_timestamp == 0 and record.connected_timestamp == 0:
add = True
if peer_id in self.host_to_selected_time:
last_selected = self.host_to_selected_time[peer_id]
if time.time() - last_selected < 120:
add = False
if add:
v6 = True
try:
_ = ipaddress.IPv6Address(peer_id)
except ValueError:
v6 = False
delta_time = 600 if v6 else 1000
if now - record.last_try_timestamp >= delta_time and now - record.connected_timestamp >= delta_time:
if not v6:
records.append(record)
else:
records_v6.append(record)
batch_size = max(min_batch_size, len(records) // 10)
batch_size = min(batch_size, max_batch_size)
if len(records) > batch_size:
random.shuffle(records)
records = records[:batch_size]
if len(records_v6) > batch_size:
random.shuffle(records_v6)
records_v6 = records_v6[:batch_size]
records += records_v6
for record in records:
self.host_to_selected_time[record.peer_id] = time.time()
return records
def get_ipv6_peers(self) -> int:
counter = 0
for peer_id in self.host_to_reliability:
v6 = True
try:
_ = ipaddress.IPv6Address(peer_id)
except ValueError:
v6 = False
if v6:
counter += 1
return counter
def get_total_records(self) -> int:
return len(self.host_to_records)
def get_ignored_peers(self) -> int:
return self.ignored_peers
def get_banned_peers(self) -> int:
return self.banned_peers
def get_reliable_peers(self) -> int:
return self.reliable_peers
async def load_to_db(self):
for peer_id in list(self.host_to_reliability.keys()):
if peer_id in self.host_to_reliability and peer_id in self.host_to_records:
reliability = self.host_to_reliability[peer_id]
record = self.host_to_records[peer_id]
await self.add_peer(record, reliability, True)
await self.crawl_db.commit()
async def unload_from_db(self):
self.host_to_records = {}
self.host_to_reliability = {}
cursor = await self.crawl_db.execute(
"SELECT * from peer_reliability",
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
reliability = PeerReliability(
row[0],
row[1],
row[2],
row[3],
row[4],
row[5],
row[6],
row[7],
row[8],
row[9],
row[10],
row[11],
row[12],
row[13],
row[14],
row[15],
row[16],
row[17],
row[18],
row[19],
)
self.host_to_reliability[row[0]] = reliability
cursor = await self.crawl_db.execute(
"SELECT * from peer_records",
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
peer = PeerRecord(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])
self.host_to_records[row[0]] = peer
# Crawler -> DNS.
async def load_reliable_peers_to_db(self):
peers = []
for peer_id in self.host_to_reliability:
reliability = self.host_to_reliability[peer_id]
if reliability.is_reliable():
peers.append(peer_id)
self.reliable_peers = len(peers)
cursor = await self.crawl_db.execute(
"DELETE from good_peers",
)
await cursor.close()
for peer in peers:
cursor = await self.crawl_db.execute(
"INSERT OR REPLACE INTO good_peers VALUES(?)",
(peer,),
)
await cursor.close()
await self.crawl_db.commit()
def load_host_to_version(self):
versions = {}
handshake = {}
for host, record in self.host_to_records.items():
if host not in self.host_to_records:
continue
record = self.host_to_records[host]
if record.version == "undefined":
continue
versions[host] = record.version
handshake[host] = record.handshake_time
return (versions, handshake)
def load_best_peer_reliability(self):
best_timestamp = {}
for host, record in self.host_to_records.items():
best_timestamp[host] = record.best_timestamp
return best_timestamp
async def update_version(self, host, version, now):
record = self.host_to_records.get(host, None)
reliability = self.host_to_reliability.get(host, None)
if record is None or reliability is None:
return
record.update_version(version, now)
await self.add_peer(record, reliability)
| 37.642659 | 118 | 0.58437 |
be129a1ef9346624554eb2fe2b0c09f4bb0cc48e | 9,941 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/ngine_io/cloudstack/plugins/modules/cs_vpc_offering.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/ngine_io/cloudstack/plugins/modules/cs_vpc_offering.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/ngine_io/cloudstack/plugins/modules/cs_vpc_offering.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, David Passante (@dpassante)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: cs_vpc_offering
short_description: Manages vpc offerings on Apache CloudStack based clouds.
description:
- Create, update, enable, disable and remove CloudStack VPC offerings.
author: David Passante (@dpassante)
version_added: 0.1.0
options:
name:
description:
- The name of the vpc offering
type: str
required: true
state:
description:
- State of the vpc offering.
type: str
choices: [ enabled, present, disabled, absent ]
default: present
display_text:
description:
- Display text of the vpc offerings
type: str
service_capabilities:
description:
- Desired service capabilities as part of vpc offering.
type: list
elements: dict
aliases: [ service_capability ]
service_offering:
description:
- The name or ID of the service offering for the VPC router appliance.
type: str
supported_services:
description:
- Services supported by the vpc offering
type: list
elements: str
aliases: [ supported_service ]
service_providers:
description:
- provider to service mapping. If not specified, the provider for the service will be mapped to the default provider on the physical network
type: list
elements: dict
aliases: [ service_provider ]
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment:
- ngine_io.cloudstack.cloudstack
'''
EXAMPLES = '''
- name: Create a vpc offering and enable it
ngine_io.cloudstack.cs_vpc_offering:
name: my_vpc_offering
display_text: vpc offering description
state: enabled
supported_services: [ Dns, Dhcp ]
service_providers:
- {service: 'dns', provider: 'VpcVirtualRouter'}
- {service: 'dhcp', provider: 'VpcVirtualRouter'}
- name: Create a vpc offering with redundant router
ngine_io.cloudstack.cs_vpc_offering:
name: my_vpc_offering
display_text: vpc offering description
supported_services: [ Dns, Dhcp, SourceNat ]
service_providers:
- {service: 'dns', provider: 'VpcVirtualRouter'}
- {service: 'dhcp', provider: 'VpcVirtualRouter'}
- {service: 'SourceNat', provider: 'VpcVirtualRouter'}
service_capabilities:
- {service: 'SourceNat', capabilitytype: 'RedundantRouter', capabilityvalue: true}
- name: Create a region level vpc offering with distributed router
ngine_io.cloudstack.cs_vpc_offering:
name: my_vpc_offering
display_text: vpc offering description
state: present
supported_services: [ Dns, Dhcp, SourceNat ]
service_providers:
- {service: 'dns', provider: 'VpcVirtualRouter'}
- {service: 'dhcp', provider: 'VpcVirtualRouter'}
- {service: 'SourceNat', provider: 'VpcVirtualRouter'}
service_capabilities:
- {service: 'Connectivity', capabilitytype: 'DistributedRouter', capabilityvalue: true}
- {service: 'Connectivity', capabilitytype: 'RegionLevelVPC', capabilityvalue: true}
- name: Remove a vpc offering
ngine_io.cloudstack.cs_vpc_offering:
name: my_vpc_offering
state: absent
'''
RETURN = '''
---
id:
description: UUID of the vpc offering.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: The name of the vpc offering
returned: success
type: str
sample: MyCustomVPCOffering
display_text:
description: The display text of the vpc offering
returned: success
type: str
sample: My vpc offering
state:
description: The state of the vpc offering
returned: success
type: str
sample: Enabled
service_offering_id:
description: The service offering ID.
returned: success
type: str
sample: c5f7a5fc-43f8-11e5-a151-feff819cdc9f
is_default:
description: Whether VPC offering is the default offering or not.
returned: success
type: bool
sample: false
region_level:
description: Indicated if the offering can support region level vpc.
returned: success
type: bool
sample: false
distributed:
description: Indicates if the vpc offering supports distributed router for one-hop forwarding.
returned: success
type: bool
sample: false
'''
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackVPCOffering(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVPCOffering, self).__init__(module)
self.returns = {
'serviceofferingid': 'service_offering_id',
'isdefault': 'is_default',
'distributedvpcrouter': 'distributed',
'supportsregionLevelvpc': 'region_level',
}
self.vpc_offering = None
def get_vpc_offering(self):
if self.vpc_offering:
return self.vpc_offering
args = {
'name': self.module.params.get('name'),
}
vo = self.query_api('listVPCOfferings', **args)
if vo:
for vpc_offer in vo['vpcoffering']:
if args['name'] == vpc_offer['name']:
self.vpc_offering = vpc_offer
return self.vpc_offering
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
if not service_offering:
return None
args = {
'issystem': True
}
service_offerings = self.query_api('listServiceOfferings', **args)
if service_offerings:
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.fail_json(msg="Service offering '%s' not found" % service_offering)
def create_or_update(self):
vpc_offering = self.get_vpc_offering()
if not vpc_offering:
vpc_offering = self.create_vpc_offering()
return self.update_vpc_offering(vpc_offering)
def create_vpc_offering(self):
vpc_offering = None
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'state': self.module.params.get('state'),
'displaytext': self.module.params.get('display_text'),
'supportedservices': self.module.params.get('supported_services'),
'serviceproviderlist': self.module.params.get('service_providers'),
'serviceofferingid': self.get_service_offering_id(),
'servicecapabilitylist': self.module.params.get('service_capabilities'),
}
required_params = [
'display_text',
'supported_services',
]
self.module.fail_on_missing_params(required_params=required_params)
if not self.module.check_mode:
res = self.query_api('createVPCOffering', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc_offering = self.poll_job(res, 'vpcoffering')
return vpc_offering
def delete_vpc_offering(self):
vpc_offering = self.get_vpc_offering()
if vpc_offering:
self.result['changed'] = True
args = {
'id': vpc_offering['id'],
}
if not self.module.check_mode:
res = self.query_api('deleteVPCOffering', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc_offering = self.poll_job(res, 'vpcoffering')
return vpc_offering
def update_vpc_offering(self, vpc_offering):
if not vpc_offering:
return vpc_offering
args = {
'id': vpc_offering['id'],
'state': self.module.params.get('state'),
'name': self.module.params.get('name'),
'displaytext': self.module.params.get('display_text'),
}
if args['state'] in ['enabled', 'disabled']:
args['state'] = args['state'].title()
else:
del args['state']
if self.has_changed(args, vpc_offering):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateVPCOffering', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpc_offering = self.poll_job(res, 'vpcoffering')
return vpc_offering
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
state=dict(choices=['enabled', 'present', 'disabled', 'absent'], default='present'),
service_capabilities=dict(type='list', elements='dict', aliases=['service_capability']),
service_offering=dict(),
supported_services=dict(type='list', elements='str', aliases=['supported_service']),
service_providers=dict(type='list', elements='dict', aliases=['service_provider']),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_vpc_offering = AnsibleCloudStackVPCOffering(module)
state = module.params.get('state')
if state in ['absent']:
vpc_offering = acs_vpc_offering.delete_vpc_offering()
else:
vpc_offering = acs_vpc_offering.create_or_update()
result = acs_vpc_offering.get_result(vpc_offering)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.065625 | 146 | 0.656775 |
446cbfb32a55ed22e34003fbf19d068bff3ad1c0 | 1,014 | py | Python | azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/explicit_list_item_update_object_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/explicit_list_item_update_object_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/explicit_list_item_update_object_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExplicitListItemUpdateObject(Model):
"""Model object for updating an explicit list item.
:param explicit_list_item: The explicit list item.
:type explicit_list_item: str
"""
_attribute_map = {
'explicit_list_item': {'key': 'explicitListItem', 'type': 'str'},
}
def __init__(self, *, explicit_list_item: str=None, **kwargs) -> None:
super(ExplicitListItemUpdateObject, self).__init__(**kwargs)
self.explicit_list_item = explicit_list_item
| 34.965517 | 76 | 0.615385 |
fe90c5599317e298faa11b9876b82e47bfc337d3 | 1,696 | py | Python | src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/application_gateway_backend_health_pool_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/application_gateway_backend_health_pool_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/application_gateway_backend_health_pool_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayBackendHealthPool(Model):
"""Application gateway BackendHealth pool.
:param backend_address_pool: Reference of an
ApplicationGatewayBackendAddressPool resource.
:type backend_address_pool:
~azure.mgmt.network.v2018_08_01.models.ApplicationGatewayBackendAddressPool
:param backend_http_settings_collection: List of
ApplicationGatewayBackendHealthHttpSettings resources.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2018_08_01.models.ApplicationGatewayBackendHealthHttpSettings]
"""
_attribute_map = {
'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'},
'backend_http_settings_collection': {'key': 'backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHealthHttpSettings]'},
}
def __init__(self, *, backend_address_pool=None, backend_http_settings_collection=None, **kwargs) -> None:
super(ApplicationGatewayBackendHealthPool, self).__init__(**kwargs)
self.backend_address_pool = backend_address_pool
self.backend_http_settings_collection = backend_http_settings_collection
| 45.837838 | 142 | 0.711675 |
6892656955c2b7bed19be47dded3985caeac255b | 13,453 | py | Python | server_logic.py | limk0032/starter-snake-python | b34303092e276c6e620a34104f625661af51fa13 | [
"MIT"
] | null | null | null | server_logic.py | limk0032/starter-snake-python | b34303092e276c6e620a34104f625661af51fa13 | [
"MIT"
] | null | null | null | server_logic.py | limk0032/starter-snake-python | b34303092e276c6e620a34104f625661af51fa13 | [
"MIT"
] | null | null | null | import random
from typing import List, Dict
import math
import numpy as np
def avoid_my_neck(my_head: Dict[str, int], my_body: List[dict], possible_moves: List[str]) -> List[str]:
# print(f'my_head:{my_head}, my_body:{my_body}')
if {"x": my_head["x"], "y": my_head["y"] + 1} in my_body and 'up' in possible_moves:
possible_moves.remove("up")
if {"x": my_head["x"], "y": my_head["y"] - 1} in my_body and 'down' in possible_moves:
possible_moves.remove("down")
if {"x": my_head["x"] + 1, "y": my_head["y"]} in my_body and 'right' in possible_moves:
possible_moves.remove("right")
if {"x": my_head["x"] - 1, "y": my_head["y"]} in my_body and 'left' in possible_moves:
possible_moves.remove("left")
return possible_moves
def avoid_border(my_head: Dict[str, int], my_body: List[dict], possible_moves: List[str], board_height: int, board_width: int) -> List[str]:
# print(f'in avoid_border, board_height:{board_height}, board_width:{board_width}, my_head:{my_head}')
if board_height - 1 == my_head["y"] and 'up' in possible_moves:
possible_moves.remove("up")
# print('remove up')
if 0 == my_head["y"] and 'down' in possible_moves:
possible_moves.remove("down")
# print('remove down')
if board_width - 1 == my_head["x"] and 'right' in possible_moves:
possible_moves.remove("right")
# print('remove right')
if 0 == my_head["x"] and 'left' in possible_moves:
possible_moves.remove("left")
# print('remove left')
return possible_moves
def get_foods_sorted_by_distance_asc(my_head: Dict[str, int], foods: List[dict]):
# print(f'my_head:{my_head}, foods:{foods}')
length_food_list = []
for food in foods:
# print(f'food:{food}, my_head:{my_head}')
distance = math.sqrt((food['x'] - my_head['x']) ** 2 + (food['y'] - my_head['y']) ** 2)
length_food_list.append({'distance': distance, 'food': food})
length_food_list_sorted = sorted(length_food_list, key=lambda x: x['distance'])
food_list_sorted = list(map(lambda x: x['food'], length_food_list_sorted))
return food_list_sorted
def combine_preferred_directions_with_possible_moves(preferred_directions: List[str], possible_moves: List[str]):
result = []
for preferred_direction in preferred_directions:
if preferred_direction in possible_moves:
result.append(preferred_direction)
return result
def get_preferred_directions_to_food(my_head: Dict[str, int], food: Dict[str, int], possible_moves: List[str]):
preferred_direction = []
if food['x'] == my_head['x'] and food['y'] < my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['down', 'left', 'right', 'up'], possible_moves)
elif food['x'] == my_head['x'] and food['y'] > my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['up', 'left', 'right', 'down'], possible_moves)
elif food['x'] < my_head['x'] and food['y'] == my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['left', 'up', 'down', 'right'], possible_moves)
elif food['x'] > my_head['x'] and food['y'] == my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['right', 'up', 'down', 'left'], possible_moves)
elif food['x'] < my_head['x'] and food['y'] < my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['down', 'left', 'up', 'right'], possible_moves)
elif food['x'] < my_head['x'] and food['y'] > my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['up', 'left', 'down', 'right'], possible_moves)
elif food['x'] > my_head['x'] and food['y'] < my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['down', 'right', 'up', 'left'], possible_moves)
elif food['x'] > my_head['x'] and food['y'] > my_head['y']:
preferred_direction = combine_preferred_directions_with_possible_moves(['up', 'right', 'down', 'left'], possible_moves)
return preferred_direction
def get_number_of_connected_tile(start: Dict[str, int], my_body: List[dict], other_snakes: List[List[dict]], board_height: int, board_width: int):
processed_tiles = []
not_accessible_tiles = my_body[:-1]
for other_snake in other_snakes:
not_accessible_tiles = not_accessible_tiles + other_snake['body']
number_of_connected_tile = get_number_of_connected_tile_recursively(start, not_accessible_tiles, board_height, board_width, processed_tiles)
return number_of_connected_tile
def get_number_of_connected_tile_recursively(start: Dict[str, int], not_accessible_tiles: List[dict], board_height: int, board_width: int, processed_tiles: List[dict]):
if start in processed_tiles:
return 0
if start in not_accessible_tiles:
return 0
if start['x'] < 0 or start['x'] == board_width:
return 0
if start['y'] < 0 or start['y'] == board_height:
return 0
processed_tiles.append(start)
result = 1
result = result + get_number_of_connected_tile_recursively({"x": start["x"] + 1, "y": start["y"]}, not_accessible_tiles, board_height, board_width, processed_tiles)
result = result + get_number_of_connected_tile_recursively({"x": start["x"], "y": start["y"] + 1}, not_accessible_tiles, board_height, board_width, processed_tiles)
result = result + get_number_of_connected_tile_recursively({"x": start["x"] - 1, "y": start["y"]}, not_accessible_tiles, board_height, board_width, processed_tiles)
result = result + get_number_of_connected_tile_recursively({"x": start["x"], "y": start["y"] - 1}, not_accessible_tiles, board_height, board_width, processed_tiles)
return result
def possible_move_to_index(possible_move: str, my_head: Dict[str, int]):
if possible_move == 'up':
return {"x": my_head["x"], "y": my_head["y"] + 1}
if possible_move == 'down':
return {"x": my_head["x"], "y": my_head["y"] - 1}
if possible_move == 'right':
return {"x": my_head["x"] + 1, "y": my_head["y"]}
if possible_move == 'left':
return {"x": my_head["x"] - 1, "y": my_head["y"]}
def re_prioritize_preferred_directions_based_on_free_connected_tile(my_head: Dict[str, int], my_body: List[dict], other_snakes: List[dict], board_height: int, board_width: int, possible_moves_prioritized: List[str]):
result_list = []
for move in possible_moves_prioritized:
possible_move_index = possible_move_to_index(move, my_head)
number_of_connected_tile = get_number_of_connected_tile(possible_move_index, my_body, other_snakes, board_height, board_width)
# print(f'number_of_connected_tile:{number_of_connected_tile}')
result_list.append({'move': move, 'number_of_connected_tile': number_of_connected_tile})
# print(f'result_list:{result_list}')
result_list = sorted(result_list, key=lambda x: x['number_of_connected_tile'], reverse=True)
# print(f'result_list:{result_list}')
result_list = list(map(lambda x: x['move'], result_list))
return result_list
def populate_min_step_to_reach_matrix(min_step_to_reach_matrix: List[List[int]], not_accessible_tiles: List[dict], processed_tiles: List[dict], board_height: int, board_width: int, from_tile: Dict[str, int], to_tile: Dict[str, int], target_tile: Dict[str, int]):
# print(f'in populate_min_step_to_reach_matrix:from_tile:{from_tile}, to_tile:{to_tile}')
if to_tile in not_accessible_tiles:
return
if to_tile in processed_tiles:
return
if to_tile == target_tile:
return
if from_tile['x'] == board_width or from_tile['x'] == -1:
return
if from_tile['y'] == board_height or from_tile['y'] == -1:
return
if to_tile['x'] == board_width or to_tile['x'] == -1:
return
if to_tile['y'] == board_height or to_tile['y'] == -1:
return
old_value = min_step_to_reach_matrix[to_tile['x']][to_tile['y']]
new_value = min_step_to_reach_matrix[from_tile['x']][from_tile['y']] + 1
if old_value == '-':
min_step_to_reach_matrix[to_tile['x']][to_tile['y']] = new_value
else:
min_step_to_reach_matrix[to_tile['x']][to_tile['y']] = min(old_value, new_value)
processed_tiles.append(to_tile)
# print(f'update ({to_tile["x"]},{to_tile["y"]}) = {min_step_to_reach_matrix[to_tile["x"]][to_tile["y"]]}, processed_tiles={processed_tiles}')
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, processed_tiles, board_height, board_width, to_tile, {"x": to_tile["x"] + 1, "y": to_tile["y"]}, target_tile)
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, processed_tiles, board_height, board_width, to_tile, {"x": to_tile["x"], "y": to_tile["y"] + 1}, target_tile)
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, processed_tiles, board_height, board_width, to_tile, {"x": to_tile["x"] - 1, "y": to_tile["y"]}, target_tile)
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, processed_tiles, board_height, board_width, to_tile, {"x": to_tile["x"], "y": to_tile["y"] - 1}, target_tile)
# # DEBUG
# print('-------------------------------------------------')
# print_min_step_to_reach_matrix(min_step_to_reach_matrix)
# print('-------------------------------------------------')
def print_min_step_to_reach_matrix(min_step_to_reach_matrix: List[List[int]]):
height = len(min_step_to_reach_matrix)
for i in range(height):
print(min_step_to_reach_matrix[height - i - 1])
def get_paths_to_foods(data: dict):
board_height = data['board']['height']
board_width = data['board']['width']
foods = data['board']['food']
head = data["you"]["head"]
body = data["you"]["body"]
other_snakes = data['board']['snakes']
foods = get_foods_sorted_by_distance_asc(head, data['board']['food'])
selected_food = foods[0]
#
not_accessible_tiles = body[:-1]
for other_snake in other_snakes:
not_accessible_tiles = not_accessible_tiles + other_snake['body']
# max = board_height * board_width
min_step_to_reach_matrix = [['-' for x in range(board_width)] for y in range(board_height)]
min_step_to_reach_matrix[head['x']][head['y']] = 0
min_step_to_reach_matrix[selected_food['x']][selected_food['y']] = -1
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, [head], board_height, board_width, head, {"x": head["x"] + 1, "y": head["y"]}, selected_food) ############# CONSIDER OTHER FOOD
print_min_step_to_reach_matrix(min_step_to_reach_matrix)
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, [head], board_height, board_width, head, {"x": head["x"], "y": head["y"] + 1}, selected_food) ############# CONSIDER OTHER FOOD
print_min_step_to_reach_matrix(min_step_to_reach_matrix)
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, [head], board_height, board_width, head, {"x": head["x"] - 1, "y": head["y"]}, selected_food) ############# CONSIDER OTHER FOOD
print_min_step_to_reach_matrix(min_step_to_reach_matrix)
populate_min_step_to_reach_matrix(min_step_to_reach_matrix, not_accessible_tiles, [head], board_height, board_width, head, {"x": head["x"], "y": head["y"] - 1}, selected_food) ############# CONSIDER OTHER FOOD
# DEBUG
# print('-------------------------------------------------')
# print(f'body:{body}, foods:{foods}')
# print_min_step_to_reach_matrix(min_step_to_reach_matrix)
# print('-------------------------------------------------')
def choose_move(data: dict) -> str:
my_head = data["you"]["head"]
my_body = data["you"]["body"]
possible_moves = ["up", "down", "left", "right"]
possible_moves = avoid_my_neck(my_head, my_body, possible_moves)
possible_moves = avoid_border(my_head, my_body, possible_moves, data['board']['height'], data['board']['width'])
if data['board']['snakes']:
for snake in data['board']['snakes']:
possible_moves = avoid_my_neck(my_head, snake['body'], possible_moves)
print(f'possible_moves:{possible_moves}')
if data['board']['food']:
nearest_food = get_foods_sorted_by_distance_asc(my_head, data['board']['food'])[0]
print(f'nearest_food:{nearest_food}')
possible_moves_prioritized = get_preferred_directions_to_food(my_head, nearest_food, possible_moves)
print(f'possible_moves_prioritized based on food:{possible_moves_prioritized}')
possible_moves_prioritized = re_prioritize_preferred_directions_based_on_free_connected_tile(my_head, my_body, data['board']['snakes'], data['board']['height'], data['board']['width'], possible_moves_prioritized)
print(f'possible_moves_prioritized based on free connected tile:{possible_moves_prioritized}')
print(f'possible_moves_prioritized:{possible_moves_prioritized}')
# debug start
get_paths_to_foods(data)
# debug end
move = possible_moves_prioritized[0]
print(f'possible_moves_prioritized:{possible_moves_prioritized}')
print(
f"=======> {data['game']['id']} MOVE {data['turn']}: {move} picked from all valid options in {possible_moves_prioritized}"
)
return move
| 59.004386 | 262 | 0.6852 |
4aa5f455a488cfbf47019b2e4956ac414c785954 | 2,036 | py | Python | tests/qemu/monitor.py | DreamPearl/FuzzyOS | e287bf139511b59abe9e2a0e7ce49444c6a5299e | [
"Apache-2.0"
] | 10 | 2021-03-04T18:48:29.000Z | 2022-03-10T19:07:54.000Z | tests/qemu/monitor.py | DreamPearl/FuzzyOS | e287bf139511b59abe9e2a0e7ce49444c6a5299e | [
"Apache-2.0"
] | 7 | 2020-06-27T13:13:08.000Z | 2021-10-17T17:09:40.000Z | tests/qemu/monitor.py | DreamPearl/FuzzyOS | e287bf139511b59abe9e2a0e7ce49444c6a5299e | [
"Apache-2.0"
] | 1 | 2022-02-10T20:09:01.000Z | 2022-02-10T20:09:01.000Z | import logging
import pexpect
import time
from tests.qemu import args
from tests.qemu import key
logger = logging.getLogger(__name__)
QEMU_PROMPT = "(qemu)"
class Monitor:
def __init__(self, port):
logger.info("Connecting to qemu, waiting for prompt")
self.process = pexpect.spawn(f"telnet 127.0.0.1 {port}")
self.wait_for_prompt()
def wait_for_prompt(self):
try:
self.process.expect(QEMU_PROMPT, timeout=2)
except pexpect.TIMEOUT as e:
logger.error("[pexpect] Qemu prompt wasn't seen within timeout")
raise e
def send_qemu_command(self, cmd, wait_prompt=True):
self.process.sendline(cmd)
if wait_prompt:
self.wait_for_prompt()
def send_key(self, key):
"""Send a key to qemu."""
self.process.sendline(f"sendkey {key}")
self.wait_for_prompt()
def type_string(self, msg):
"""Type string in qemu."""
logger.info(f"[to_qemu] typing string: {repr(msg)}")
for char in msg:
self.send_key(key.char_to_key(char))
def handle_qemu_cmd(monitor, args):
if not args.qemu_cmd:
return
logger.info("Handle: fuzzy_shell_cmd")
cmd = " ".join(args.qemu_cmd) + "\n"
monitor.send_qemu_command(cmd)
def handle_fuzzy_shell_cmd(monitor, args):
if not args.fuzzy_shell_cmd:
return
logger.info("Handle: fuzzy_shell_cmd")
cmd = " ".join(args.fuzzy_shell_cmd) + "\n"
monitor.type_string(cmd)
def handle_qemu_quit(monitor, args):
if not args.quit:
return
logger.info("Handle: qemu quit")
monitor.send_qemu_command("quit\n", wait_prompt=False)
time.sleep(1)
def main():
logging.basicConfig(level=logging.INFO)
parsed_args = args.get_args()
monitor = Monitor(parsed_args.port)
handle_fuzzy_shell_cmd(monitor, parsed_args)
handle_qemu_cmd(monitor, parsed_args)
handle_qemu_quit(monitor, parsed_args)
logger.info("graceful exit")
if __name__ == '__main__':
main()
| 26.102564 | 76 | 0.657171 |
ad2a8d1bb9fe5a88923ea55cedc67689c7b9ab3d | 5,025 | py | Python | venv/Lib/site-packages/mediapipe/calculators/image/recolor_calculator_pb2.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 41 | 2021-06-19T13:57:18.000Z | 2021-12-02T17:08:53.000Z | venv/Lib/site-packages/mediapipe/calculators/image/recolor_calculator_pb2.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/mediapipe/calculators/image/recolor_calculator_pb2.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 4 | 2021-07-02T03:09:51.000Z | 2021-11-25T13:00:10.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/image/recolor_calculator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
from mediapipe.util import color_pb2 as mediapipe_dot_util_dot_color__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/image/recolor_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_pb=_b('\n4mediapipe/calculators/image/recolor_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\x1a\x1amediapipe/util/color.proto\"\x8a\x02\n\x18RecolorCalculatorOptions\x12J\n\x0cmask_channel\x18\x01 \x01(\x0e\x32/.mediapipe.RecolorCalculatorOptions.MaskChannel:\x03RED\x12\x1f\n\x05\x63olor\x18\x02 \x01(\x0b\x32\x10.mediapipe.Color\".\n\x0bMaskChannel\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03RED\x10\x01\x12\t\n\x05\x41LPHA\x10\x02\x32Q\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x8d\x84\xb5x \x01(\x0b\x32#.mediapipe.RecolorCalculatorOptions')
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,mediapipe_dot_util_dot_color__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RECOLORCALCULATOROPTIONS_MASKCHANNEL = _descriptor.EnumDescriptor(
name='MaskChannel',
full_name='mediapipe.RecolorCalculatorOptions.MaskChannel',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALPHA', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=271,
serialized_end=317,
)
_sym_db.RegisterEnumDescriptor(_RECOLORCALCULATOROPTIONS_MASKCHANNEL)
_RECOLORCALCULATOROPTIONS = _descriptor.Descriptor(
name='RecolorCalculatorOptions',
full_name='mediapipe.RecolorCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mask_channel', full_name='mediapipe.RecolorCalculatorOptions.mask_channel', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='color', full_name='mediapipe.RecolorCalculatorOptions.color', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.RecolorCalculatorOptions.ext', index=0,
number=252527117, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
_RECOLORCALCULATOROPTIONS_MASKCHANNEL,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=134,
serialized_end=400,
)
_RECOLORCALCULATOROPTIONS.fields_by_name['mask_channel'].enum_type = _RECOLORCALCULATOROPTIONS_MASKCHANNEL
_RECOLORCALCULATOROPTIONS.fields_by_name['color'].message_type = mediapipe_dot_util_dot_color__pb2._COLOR
_RECOLORCALCULATOROPTIONS_MASKCHANNEL.containing_type = _RECOLORCALCULATOROPTIONS
DESCRIPTOR.message_types_by_name['RecolorCalculatorOptions'] = _RECOLORCALCULATOROPTIONS
RecolorCalculatorOptions = _reflection.GeneratedProtocolMessageType('RecolorCalculatorOptions', (_message.Message,), dict(
DESCRIPTOR = _RECOLORCALCULATOROPTIONS,
__module__ = 'mediapipe.calculators.image.recolor_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.RecolorCalculatorOptions)
))
_sym_db.RegisterMessage(RecolorCalculatorOptions)
_RECOLORCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _RECOLORCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_RECOLORCALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| 41.875 | 594 | 0.799204 |
d5ac760481deec7fd65c64be885de8203abac1d4 | 4,661 | py | Python | rllib/examples/two_step_game.py | tdml13/ray | 01db1ec9f922370da27b78b0d85ce4f3ffaf4377 | [
"Apache-2.0"
] | 39 | 2021-02-02T23:09:31.000Z | 2022-03-28T16:39:12.000Z | rllib/examples/two_step_game.py | chaokunyang/ray | 7a1e8fdb8bfd1df00dc9b946b233d85dc4be2387 | [
"Apache-2.0"
] | 70 | 2019-03-13T05:25:48.000Z | 2022-03-26T07:05:19.000Z | rllib/examples/two_step_game.py | chaokunyang/ray | 7a1e8fdb8bfd1df00dc9b946b233d85dc4be2387 | [
"Apache-2.0"
] | 20 | 2021-02-05T05:51:39.000Z | 2022-03-04T21:13:24.000Z | """The two-step game from QMIX: https://arxiv.org/pdf/1803.11485.pdf
Configurations you can try:
- normal policy gradients (PG)
- contrib/MADDPG
- QMIX
See also: centralized_critic.py for centralized critic PPO on this game.
"""
import argparse
from gym.spaces import Tuple, MultiDiscrete, Dict, Discrete
import os
import ray
from ray import tune
from ray.tune import register_env, grid_search
from ray.rllib.env.multi_agent_env import ENV_STATE
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument(
"--run",
type=str,
default="PG",
help="The RLlib-registered algorithm to use.")
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=200,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=50000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=7.0,
help="Reward at which we stop training.")
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
grouping = {
"group_1": [0, 1],
}
obs_space = Tuple([
Dict({
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2])
}),
Dict({
"obs": MultiDiscrete([2, 2, 2, 3]),
ENV_STATE: MultiDiscrete([2, 2, 2])
}),
])
act_space = Tuple([
TwoStepGame.action_space,
TwoStepGame.action_space,
])
register_env(
"grouped_twostep",
lambda config: TwoStepGame(config).with_agent_groups(
grouping, obs_space=obs_space, act_space=act_space))
if args.run == "contrib/MADDPG":
obs_space_dict = {
"agent_1": Discrete(6),
"agent_2": Discrete(6),
}
act_space_dict = {
"agent_1": TwoStepGame.action_space,
"agent_2": TwoStepGame.action_space,
}
config = {
"learning_starts": 100,
"env_config": {
"actions_are_logits": True,
},
"multiagent": {
"policies": {
"pol1": (None, Discrete(6), TwoStepGame.action_space, {
"agent_id": 0,
}),
"pol2": (None, Discrete(6), TwoStepGame.action_space, {
"agent_id": 1,
}),
},
"policy_mapping_fn": (
lambda aid, **kwargs: "pol2" if aid else "pol1"),
},
"framework": args.framework,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
}
group = False
elif args.run == "QMIX":
config = {
"rollout_fragment_length": 4,
"train_batch_size": 32,
"exploration_config": {
"epsilon_timesteps": 5000,
"final_epsilon": 0.05,
},
"num_workers": 0,
"mixer": grid_search([None, "qmix"]),
"env_config": {
"separate_state_space": True,
"one_hot_state_encoding": True
},
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
}
group = True
else:
config = {
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": args.framework,
}
group = False
stop = {
"episode_reward_mean": args.stop_reward,
"timesteps_total": args.stop_timesteps,
"training_iteration": args.stop_iters,
}
config = dict(config, **{
"env": "grouped_twostep" if group else TwoStepGame,
})
if args.as_test:
config["seed"] = 1234
results = tune.run(args.run, stop=stop, config=config, verbose=2)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
ray.shutdown()
| 29.5 | 75 | 0.560609 |
e0ae8eb56e2439eadb1780e1a612a1809b2ea734 | 566 | py | Python | regexlib/2021-5-15/python_re2_test_file/regexlib_3314.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/2021-5-15/python_re2_test_file/regexlib_3314.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/2021-5-15/python_re2_test_file/regexlib_3314.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 3314
# ^([\u00c0-\u01ffa-zA-Z'\-]+[ ]?[\u00c0-\u01ffa-zA-Z'\-]*)+$
# EXPONENT
# nums:5
# EXPONENT AttackString:""+"''"*8+"!1 __EOA(i or ii)"
import re2 as re
from time import perf_counter
regex = """^([\u00c0-\u01ffa-zA-Z'\-]+[ ]?[\u00c0-\u01ffa-zA-Z'\-]*)+$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "\'\'" * i * 1 + "!1 __EOA(i or ii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | 29.789474 | 73 | 0.567138 |
c1d90b0ed7d3d9ba08f1c60b58a761aebd888b23 | 39,894 | py | Python | ethgreen/pools/pool_wallet.py | ethgreen/ethgreen-blockchain | 8f1a450897ab7a82326aea7e57e18ac2c03a9e83 | [
"Apache-2.0"
] | 11 | 2021-11-10T19:30:12.000Z | 2022-02-09T04:30:29.000Z | ethgreen/pools/pool_wallet.py | ethgreen/ethgreen-blockchain | 8f1a450897ab7a82326aea7e57e18ac2c03a9e83 | [
"Apache-2.0"
] | 6 | 2021-11-16T17:11:03.000Z | 2021-12-28T17:11:20.000Z | ethgreen/pools/pool_wallet.py | ethgreen/ethgreen-blockchain | 8f1a450897ab7a82326aea7e57e18ac2c03a9e83 | [
"Apache-2.0"
] | 3 | 2021-11-21T02:27:10.000Z | 2022-03-15T08:34:47.000Z | import logging
import time
from typing import Any, Optional, Set, Tuple, List, Dict
from blspy import PrivateKey, G2Element, G1Element
from ethgreen.consensus.block_record import BlockRecord
from ethgreen.pools.pool_config import PoolWalletConfig, load_pool_config, update_pool_config
from ethgreen.pools.pool_wallet_info import (
PoolWalletInfo,
PoolSingletonState,
PoolState,
FARMING_TO_POOL,
SELF_POOLING,
LEAVING_POOL,
create_pool_state,
)
from ethgreen.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from ethgreen.types.announcement import Announcement
from ethgreen.types.blockchain_format.coin import Coin
from ethgreen.types.blockchain_format.sized_bytes import bytes32
from ethgreen.types.blockchain_format.program import Program, SerializedProgram
from ethgreen.types.coin_record import CoinRecord
from ethgreen.types.coin_spend import CoinSpend
from ethgreen.types.spend_bundle import SpendBundle
from ethgreen.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_full_puzzle,
SINGLETON_LAUNCHER,
create_pooling_inner_puzzle,
solution_to_pool_state,
pool_state_to_inner_puzzle,
get_most_recent_singleton_coin_from_coin_spend,
launcher_id_to_p2_puzzle_hash,
create_travel_spend,
uncurry_pool_member_inner_puzzle,
create_absorb_spend,
is_pool_member_inner_puzzle,
is_pool_waitingroom_inner_puzzle,
uncurry_pool_waitingroom_inner_puzzle,
get_delayed_puz_info_from_launcher_spend,
)
from ethgreen.util.ints import uint8, uint32, uint64
from ethgreen.wallet.derive_keys import (
master_sk_to_pooling_authentication_sk,
find_owner_sk,
)
from ethgreen.wallet.sign_coin_spends import sign_coin_spends
from ethgreen.wallet.transaction_record import TransactionRecord
from ethgreen.wallet.util.wallet_types import WalletType
from ethgreen.wallet.wallet import Wallet
from ethgreen.wallet.wallet_coin_record import WalletCoinRecord
from ethgreen.wallet.wallet_info import WalletInfo
from ethgreen.wallet.util.transaction_type import TransactionType
class PoolWallet:
MINIMUM_INITIAL_BALANCE = 1
MINIMUM_RELATIVE_LOCK_HEIGHT = 5
MAXIMUM_RELATIVE_LOCK_HEIGHT = 1000
wallet_state_manager: Any
log: logging.Logger
wallet_info: WalletInfo
target_state: Optional[PoolState]
next_transaction_fee: uint64
standard_wallet: Wallet
wallet_id: int
singleton_list: List[Coin]
"""
From the user's perspective, this is not a wallet at all, but a way to control
whether their pooling-enabled plots are being self-farmed, or farmed by a pool,
and by which pool. Self-pooling and joint pooling rewards are swept into the
users' regular wallet.
If this wallet is in SELF_POOLING state, the coin ID associated with the current
pool wallet contains the rewards gained while self-farming, so care must be taken
to disallow joining a new pool while we still have money on the pooling singleton UTXO.
Pools can be joined anonymously, without an account or prior signup.
The ability to change the farm-to target prevents abuse from pools
by giving the user the ability to quickly change pools, or self-farm.
The pool is also protected, by not allowing members to cheat by quickly leaving a pool,
and claiming a block that was pledged to the pool.
The pooling protocol and smart coin prevents a user from quickly leaving a pool
by enforcing a wait time when leaving the pool. A minimum number of blocks must pass
after the user declares that they are leaving the pool, and before they can start to
self-claim rewards again.
Control of switching states is granted to the owner public key.
We reveal the inner_puzzle to the pool during setup of the pooling protocol.
The pool can prove to itself that the inner puzzle pays to the pooling address,
and it can follow state changes in the pooling puzzle by tracing destruction and
creation of coins associate with this pooling singleton (the singleton controlling
this pool group).
The user trusts the pool to send mining rewards to the <XXX address XXX>
TODO: We should mark which address is receiving funds for our current state.
If the pool misbehaves, it is the user's responsibility to leave the pool
It is the Pool's responsibility to claim the rewards sent to the pool_puzzlehash.
The timeout for leaving the pool is expressed in number of blocks from the time
the user expresses their intent to leave.
"""
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.POOLING_WALLET)
def id(self):
return self.wallet_info.id
@classmethod
def _verify_self_pooled(cls, state) -> Optional[str]:
err = ""
if state.pool_url != "":
err += " Unneeded pool_url for self-pooling"
if state.relative_lock_height != 0:
err += " Incorrect relative_lock_height for self-pooling"
return None if err == "" else err
@classmethod
def _verify_pooling_state(cls, state) -> Optional[str]:
err = ""
if state.relative_lock_height < cls.MINIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is less than recommended minimum ({cls.MINIMUM_RELATIVE_LOCK_HEIGHT})"
)
elif state.relative_lock_height > cls.MAXIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is greater than recommended maximum ({cls.MAXIMUM_RELATIVE_LOCK_HEIGHT})"
)
if state.pool_url in [None, ""]:
err += " Empty pool url in pooling state"
return err
@classmethod
def _verify_pool_state(cls, state: PoolState) -> Optional[str]:
if state.target_puzzle_hash is None:
return "Invalid puzzle_hash"
if state.version > POOL_PROTOCOL_VERSION:
return (
f"Detected pool protocol version {state.version}, which is "
f"newer than this wallet's version ({POOL_PROTOCOL_VERSION}). Please upgrade "
f"to use this pooling wallet"
)
if state.state == PoolSingletonState.SELF_POOLING:
return cls._verify_self_pooled(state)
elif state.state == PoolSingletonState.FARMING_TO_POOL or state.state == PoolSingletonState.LEAVING_POOL:
return cls._verify_pooling_state(state)
else:
return "Internal Error"
@classmethod
def _verify_initial_target_state(cls, initial_target_state):
err = cls._verify_pool_state(initial_target_state)
if err:
raise ValueError(f"Invalid internal Pool State: {err}: {initial_target_state}")
async def get_spend_history(self) -> List[Tuple[uint32, CoinSpend]]:
return self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)
async def get_current_state(self) -> PoolWalletInfo:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
all_spends: List[CoinSpend] = [cs for _, cs in history]
# We must have at least the launcher spend
assert len(all_spends) >= 1
launcher_coin: Coin = all_spends[0].coin
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(all_spends[0])
tip_singleton_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(all_spends[-1])
launcher_id: bytes32 = launcher_coin.name()
p2_singleton_puzzle_hash = launcher_id_to_p2_puzzle_hash(launcher_id, delayed_seconds, delayed_puzhash)
assert tip_singleton_coin is not None
curr_spend_i = len(all_spends) - 1
pool_state: Optional[PoolState] = None
last_singleton_spend_height = uint32(0)
while pool_state is None:
full_spend: CoinSpend = all_spends[curr_spend_i]
pool_state = solution_to_pool_state(full_spend)
last_singleton_spend_height = uint32(history[curr_spend_i][0])
curr_spend_i -= 1
assert pool_state is not None
current_inner = pool_state_to_inner_puzzle(
pool_state,
launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
return PoolWalletInfo(
pool_state,
self.target_state,
launcher_coin,
launcher_id,
p2_singleton_puzzle_hash,
current_inner,
tip_singleton_coin.name(),
last_singleton_spend_height,
)
async def get_unconfirmed_transactions(self) -> List[TransactionRecord]:
return await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
async def get_tip(self) -> Tuple[uint32, CoinSpend]:
return self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)[-1]
async def update_pool_config(self, make_new_authentication_key: bool):
current_state: PoolWalletInfo = await self.get_current_state()
pool_config_list: List[PoolWalletConfig] = load_pool_config(self.wallet_state_manager.root_path)
pool_config_dict: Dict[bytes32, PoolWalletConfig] = {c.launcher_id: c for c in pool_config_list}
existing_config: Optional[PoolWalletConfig] = pool_config_dict.get(current_state.launcher_id, None)
if make_new_authentication_key or existing_config is None:
new_auth_sk: PrivateKey = master_sk_to_pooling_authentication_sk(
self.wallet_state_manager.private_key, uint32(self.wallet_id), uint32(0)
)
auth_pk: G1Element = new_auth_sk.get_g1()
payout_instructions: str = (await self.standard_wallet.get_new_puzzlehash(in_transaction=True)).hex()
else:
auth_pk = existing_config.authentication_public_key
payout_instructions = existing_config.payout_instructions
new_config: PoolWalletConfig = PoolWalletConfig(
current_state.launcher_id,
current_state.current.pool_url if current_state.current.pool_url else "",
payout_instructions,
current_state.current.target_puzzle_hash,
current_state.p2_singleton_puzzle_hash,
current_state.current.owner_pubkey,
auth_pk,
)
pool_config_dict[new_config.launcher_id] = new_config
await update_pool_config(self.wallet_state_manager.root_path, list(pool_config_dict.values()))
@staticmethod
def get_next_interesting_coin_ids(spend: CoinSpend) -> List[bytes32]:
# CoinSpend of one of the coins that we cared about. This coin was spent in a block, but might be in a reorg
# If we return a value, it is a coin ID that we are also interested in (to support two transitions per block)
coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(spend)
if coin is not None:
return [coin.name()]
return []
async def apply_state_transitions(self, block_spends: List[CoinSpend], block_height: uint32):
"""
Updates the Pool state (including DB) with new singleton spends. The block spends can contain many spends
that we are not interested in, and can contain many ephemeral spends. They must all be in the same block.
The DB must be committed after calling this method. All validation should be done here.
"""
coin_name_to_spend: Dict[bytes32, CoinSpend] = {cs.coin.name(): cs for cs in block_spends}
tip: Tuple[uint32, CoinSpend] = await self.get_tip()
tip_height = tip[0]
tip_spend = tip[1]
assert block_height >= tip_height # We should not have a spend with a lesser block height
while True:
tip_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert tip_coin is not None
spent_coin_name: bytes32 = tip_coin.name()
if spent_coin_name not in coin_name_to_spend:
break
spend: CoinSpend = coin_name_to_spend[spent_coin_name]
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, spend, block_height)
tip_spend = (await self.get_tip())[1]
self.log.info(f"New PoolWallet singleton tip_coin: {tip_spend}")
coin_name_to_spend.pop(spent_coin_name)
# If we have reached the target state, resets it to None. Loops back to get current state
for _, added_spend in reversed(self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)):
latest_state: Optional[PoolState] = solution_to_pool_state(added_spend)
if latest_state is not None:
if self.target_state == latest_state:
self.target_state = None
self.next_transaction_fee = uint64(0)
break
await self.update_pool_config(False)
async def rewind(self, block_height: int) -> bool:
"""
Rolls back all transactions after block_height, and if creation was after block_height, deletes the wallet.
Returns True if the wallet should be removed.
"""
try:
history: List[Tuple[uint32, CoinSpend]] = self.wallet_state_manager.pool_store.get_spends_for_wallet(
self.wallet_id
).copy()
prev_state: PoolWalletInfo = await self.get_current_state()
await self.wallet_state_manager.pool_store.rollback(block_height, self.wallet_id)
if len(history) > 0 and history[0][0] > block_height:
# If we have no entries in the DB, we have no singleton, so we should not have a wallet either
# The PoolWallet object becomes invalid after this.
await self.wallet_state_manager.interested_store.remove_interested_puzzle_hash(
prev_state.p2_singleton_puzzle_hash, in_transaction=True
)
return True
else:
if await self.get_current_state() != prev_state:
await self.update_pool_config(False)
return False
except Exception as e:
self.log.error(f"Exception rewinding: {e}")
return False
@staticmethod
async def create(
wallet_state_manager: Any,
wallet: Wallet,
launcher_coin_id: bytes32,
block_spends: List[CoinSpend],
block_height: uint32,
in_transaction: bool,
name: str = None,
):
"""
This creates a new PoolWallet with only one spend: the launcher spend. The DB MUST be committed after calling
this method.
"""
self = PoolWallet()
self.wallet_state_manager = wallet_state_manager
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"Pool wallet", WalletType.POOLING_WALLET.value, "", in_transaction=in_transaction
)
self.wallet_id = self.wallet_info.id
self.standard_wallet = wallet
self.target_state = None
self.next_transaction_fee = uint64(0)
self.log = logging.getLogger(name if name else __name__)
launcher_spend: Optional[CoinSpend] = None
for spend in block_spends:
if spend.coin.name() == launcher_coin_id:
launcher_spend = spend
assert launcher_spend is not None
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, launcher_spend, block_height)
await self.update_pool_config(True)
p2_puzzle_hash: bytes32 = (await self.get_current_state()).p2_singleton_puzzle_hash
await self.wallet_state_manager.interested_store.add_interested_puzzle_hash(
p2_puzzle_hash, self.wallet_id, True
)
await self.wallet_state_manager.add_new_wallet(self, self.wallet_info.id, create_puzzle_hashes=False)
self.wallet_state_manager.set_new_peak_callback(self.wallet_id, self.new_peak)
return self
@staticmethod
async def create_from_db(
wallet_state_manager: Any,
wallet: Wallet,
wallet_info: WalletInfo,
name: str = None,
):
"""
This creates a PoolWallet from DB. However, all data is already handled by WalletPoolStore, so we don't need
to do anything here.
"""
self = PoolWallet()
self.wallet_state_manager = wallet_state_manager
self.wallet_id = wallet_info.id
self.standard_wallet = wallet
self.wallet_info = wallet_info
self.target_state = None
self.log = logging.getLogger(name if name else __name__)
self.wallet_state_manager.set_new_peak_callback(self.wallet_id, self.new_peak)
return self
@staticmethod
async def create_new_pool_wallet_transaction(
wallet_state_manager: Any,
main_wallet: Wallet,
initial_target_state: PoolState,
fee: uint64 = uint64(0),
p2_singleton_delay_time: Optional[uint64] = None,
p2_singleton_delayed_ph: Optional[bytes32] = None,
) -> Tuple[TransactionRecord, bytes32, bytes32]:
"""
A "plot NFT", or pool wallet, represents the idea of a set of plots that all pay to
the same pooling puzzle. This puzzle is a `ethgreen singleton` that is
parameterized with a public key controlled by the user's wallet
(a `smart coin`). It contains an inner puzzle that can switch between
paying block rewards to a pool, or to a user's own wallet.
Call under the wallet state manger lock
"""
amount = 1
standard_wallet = main_wallet
if p2_singleton_delayed_ph is None:
p2_singleton_delayed_ph = await main_wallet.get_new_puzzlehash()
if p2_singleton_delay_time is None:
p2_singleton_delay_time = uint64(604800)
unspent_records = await wallet_state_manager.coin_store.get_unspent_coins_for_wallet(standard_wallet.wallet_id)
balance = await standard_wallet.get_confirmed_balance(unspent_records)
if balance < PoolWallet.MINIMUM_INITIAL_BALANCE:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool.")
if balance < fee:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool with fee {fee}.")
# Verify Parameters - raise if invalid
PoolWallet._verify_initial_target_state(initial_target_state)
spend_bundle, singleton_puzzle_hash, launcher_coin_id = await PoolWallet.generate_launcher_spend(
standard_wallet,
uint64(1),
initial_target_state,
wallet_state_manager.constants.GENESIS_CHALLENGE,
p2_singleton_delay_time,
p2_singleton_delayed_ph,
)
if spend_bundle is None:
raise ValueError("failed to generate ID for wallet")
standard_wallet_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=singleton_puzzle_hash,
amount=uint64(amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet_state_manager.main_wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await standard_wallet.push_transaction(standard_wallet_record)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(
launcher_coin_id, p2_singleton_delay_time, p2_singleton_delayed_ph
)
return standard_wallet_record, p2_singleton_puzzle_hash, launcher_coin_id
async def sign(self, coin_spend: CoinSpend) -> SpendBundle:
async def pk_to_sk(pk: G1Element) -> PrivateKey:
owner_sk: Optional[PrivateKey] = await find_owner_sk([self.wallet_state_manager.private_key], pk)
assert owner_sk is not None
return owner_sk
return await sign_coin_spends(
[coin_spend],
pk_to_sk,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_travel_transaction(self, fee: uint64) -> TransactionRecord:
# target_state is contained within pool_wallet_state
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
spend_history = await self.get_spend_history()
last_coin_spend: CoinSpend = spend_history[-1][1]
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(spend_history[0][1])
assert pool_wallet_info.target is not None
next_state = pool_wallet_info.target
if pool_wallet_info.current.state in [FARMING_TO_POOL]:
next_state = create_pool_state(
LEAVING_POOL,
pool_wallet_info.current.target_puzzle_hash,
pool_wallet_info.current.owner_pubkey,
pool_wallet_info.current.pool_url,
pool_wallet_info.current.relative_lock_height,
)
new_inner_puzzle = pool_state_to_inner_puzzle(
next_state,
pool_wallet_info.launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
new_full_puzzle: SerializedProgram = SerializedProgram.from_program(
create_full_puzzle(new_inner_puzzle, pool_wallet_info.launcher_coin.name())
)
outgoing_coin_spend, inner_puzzle = create_travel_spend(
last_coin_spend,
pool_wallet_info.launcher_coin,
pool_wallet_info.current,
next_state,
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
tip = (await self.get_tip())[1]
tip_coin = tip.coin
singleton = tip.additions()[0]
singleton_id = singleton.name()
assert outgoing_coin_spend.coin.parent_coin_info == tip_coin.name()
assert outgoing_coin_spend.coin.name() == singleton_id
assert new_inner_puzzle != inner_puzzle
if is_pool_member_inner_puzzle(inner_puzzle):
(
inner_f,
target_puzzle_hash,
p2_singleton_hash,
pubkey_as_program,
pool_reward_prefix,
escape_puzzle_hash,
) = uncurry_pool_member_inner_puzzle(inner_puzzle)
pk_bytes: bytes = bytes(pubkey_as_program.as_atom())
assert len(pk_bytes) == 48
owner_pubkey = G1Element.from_bytes(pk_bytes)
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
(
target_puzzle_hash, # payout_puzzle_hash
relative_lock_height,
owner_pubkey,
p2_singleton_hash,
) = uncurry_pool_waitingroom_inner_puzzle(inner_puzzle)
pk_bytes = bytes(owner_pubkey.as_atom())
assert len(pk_bytes) == 48
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
else:
raise RuntimeError("Invalid state")
signed_spend_bundle = await self.sign(outgoing_coin_spend)
assert signed_spend_bundle.removals()[0].puzzle_hash == singleton.puzzle_hash
assert signed_spend_bundle.removals()[0].name() == singleton.name()
assert signed_spend_bundle is not None
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=new_full_puzzle.get_tree_hash(),
amount=uint64(1),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=signed_spend_bundle,
additions=signed_spend_bundle.additions(),
removals=signed_spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=signed_spend_bundle.name(),
)
return tx_record
@staticmethod
async def generate_launcher_spend(
standard_wallet: Wallet,
amount: uint64,
initial_target_state: PoolState,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Tuple[SpendBundle, bytes32, bytes32]:
"""
Creates the initial singleton, which includes spending an origin coin, the launcher, and creating a singleton
with the "pooling" inner state, which can be either self pooling or using a pool
"""
coins: Set[Coin] = await standard_wallet.select_coins(amount)
if coins is None:
raise ValueError("Not enough coins to create pool wallet")
assert len(coins) == 1
launcher_parent: Coin = coins.copy().pop()
genesis_launcher_puz: Program = SINGLETON_LAUNCHER
launcher_coin: Coin = Coin(launcher_parent.name(), genesis_launcher_puz.get_tree_hash(), amount)
escaping_inner_puzzle: bytes32 = create_waiting_room_inner_puzzle(
initial_target_state.target_puzzle_hash,
initial_target_state.relative_lock_height,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
escaping_inner_puzzle_hash = escaping_inner_puzzle.get_tree_hash()
self_pooling_inner_puzzle: Program = create_pooling_inner_puzzle(
initial_target_state.target_puzzle_hash,
escaping_inner_puzzle_hash,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
if initial_target_state.state == SELF_POOLING:
puzzle = escaping_inner_puzzle
elif initial_target_state.state == FARMING_TO_POOL:
puzzle = self_pooling_inner_puzzle
else:
raise ValueError("Invalid initial state")
full_pooling_puzzle: Program = create_full_puzzle(puzzle, launcher_id=launcher_coin.name())
puzzle_hash: bytes32 = full_pooling_puzzle.get_tree_hash()
pool_state_bytes = Program.to([("p", bytes(initial_target_state)), ("t", delay_time), ("h", delay_ph)])
announcement_set: Set[bytes32] = set()
announcement_message = Program.to([puzzle_hash, amount, pool_state_bytes]).get_tree_hash()
announcement_set.add(Announcement(launcher_coin.name(), announcement_message).name())
create_launcher_tx_record: Optional[TransactionRecord] = await standard_wallet.generate_signed_transaction(
amount,
genesis_launcher_puz.get_tree_hash(),
uint64(0),
None,
coins,
None,
False,
announcement_set,
)
assert create_launcher_tx_record is not None and create_launcher_tx_record.spend_bundle is not None
genesis_launcher_solution: Program = Program.to([puzzle_hash, amount, pool_state_bytes])
launcher_cs: CoinSpend = CoinSpend(
launcher_coin,
SerializedProgram.from_program(genesis_launcher_puz),
SerializedProgram.from_program(genesis_launcher_solution),
)
launcher_sb: SpendBundle = SpendBundle([launcher_cs], G2Element())
# Current inner will be updated when state is verified on the blockchain
full_spend: SpendBundle = SpendBundle.aggregate([create_launcher_tx_record.spend_bundle, launcher_sb])
return full_spend, puzzle_hash, launcher_coin.name()
async def join_pool(self, target_state: PoolState, fee: uint64) -> Tuple[uint64, TransactionRecord]:
if target_state.state != FARMING_TO_POOL:
raise ValueError(f"join_pool must be called with target_state={FARMING_TO_POOL} (FARMING_TO_POOL)")
if self.target_state is not None:
raise ValueError(f"Cannot join a pool while waiting for target state: {self.target_state}")
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot join pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = fee
if current_state.current == target_state:
self.target_state = None
msg = f"Asked to change to current state. Target = {target_state}"
self.log.info(msg)
raise ValueError(msg)
elif current_state.current.state in [SELF_POOLING, LEAVING_POOL]:
total_fee = fee
elif current_state.current.state == FARMING_TO_POOL:
total_fee = uint64(fee * 2)
if self.target_state is not None:
raise ValueError(
f"Cannot change to state {target_state} when already having target state: {self.target_state}"
)
PoolWallet._verify_initial_target_state(target_state)
if current_state.current.state == LEAVING_POOL:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
last_height: uint32 = history[-1][0]
if self.wallet_state_manager.get_peak().height <= last_height + current_state.current.relative_lock_height:
raise ValueError(
f"Cannot join a pool until height {last_height + current_state.current.relative_lock_height}"
)
self.target_state = target_state
self.next_transaction_fee = fee
tx_record: TransactionRecord = await self.generate_travel_transaction(fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
return total_fee, tx_record
async def self_pool(self, fee: uint64) -> Tuple[uint64, TransactionRecord]:
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot self pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
if pool_wallet_info.current.state == SELF_POOLING:
raise ValueError("Attempted to self pool when already self pooling")
if self.target_state is not None:
raise ValueError(f"Cannot self pool when already having target state: {self.target_state}")
# Note the implications of getting owner_puzzlehash from our local wallet right now
# vs. having pre-arranged the target self-pooling address
owner_puzzlehash = await self.standard_wallet.get_new_puzzlehash()
owner_pubkey = pool_wallet_info.current.owner_pubkey
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = uint64(fee * 2)
if current_state.current.state == LEAVING_POOL:
total_fee = fee
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
last_height: uint32 = history[-1][0]
if self.wallet_state_manager.get_peak().height <= last_height + current_state.current.relative_lock_height:
raise ValueError(
f"Cannot self pool until height {last_height + current_state.current.relative_lock_height}"
)
self.target_state = create_pool_state(
SELF_POOLING, owner_puzzlehash, owner_pubkey, pool_url=None, relative_lock_height=uint32(0)
)
self.next_transaction_fee = fee
tx_record = await self.generate_travel_transaction(fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
return total_fee, tx_record
async def claim_pool_rewards(self, fee: uint64) -> TransactionRecord:
# Search for p2_puzzle_hash coins, and spend them with the singleton
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot claim due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
unspent_coin_records: List[CoinRecord] = list(
await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.wallet_id)
)
if len(unspent_coin_records) == 0:
raise ValueError("Nothing to claim, no transactions to p2_singleton_puzzle_hash")
farming_rewards: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_farming_rewards()
coin_to_height_farmed: Dict[Coin, uint32] = {}
for tx_record in farming_rewards:
height_farmed: Optional[uint32] = tx_record.height_farmed(
self.wallet_state_manager.constants.GENESIS_CHALLENGE
)
assert height_farmed is not None
coin_to_height_farmed[tx_record.additions[0]] = height_farmed
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
assert len(history) > 0
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(history[0][1])
current_state: PoolWalletInfo = await self.get_current_state()
last_solution: CoinSpend = history[-1][1]
all_spends: List[CoinSpend] = []
total_amount = 0
for coin_record in unspent_coin_records:
if coin_record.coin not in coin_to_height_farmed:
continue
if len(all_spends) >= 100:
# Limit the total number of spends, so it fits into the block
break
absorb_spend: List[CoinSpend] = create_absorb_spend(
last_solution,
current_state.current,
current_state.launcher_coin,
coin_to_height_farmed[coin_record.coin],
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
last_solution = absorb_spend[0]
all_spends += absorb_spend
total_amount += coin_record.coin.amount
self.log.info(
f"Farmer coin: {coin_record.coin} {coin_record.coin.name()} {coin_to_height_farmed[coin_record.coin]}"
)
if len(all_spends) == 0:
raise ValueError("Nothing to claim, no unspent coinbase rewards")
# No signatures are required to absorb
spend_bundle: SpendBundle = SpendBundle(all_spends, G2Element())
absorb_transaction: TransactionRecord = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=current_state.current.target_puzzle_hash,
amount=uint64(total_amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(self.wallet_id),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await self.wallet_state_manager.add_pending_transaction(absorb_transaction)
return absorb_transaction
async def new_peak(self, peak: BlockRecord) -> None:
# This gets called from the WalletStateManager whenever there is a new peak
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
tip_height, tip_spend = await self.get_tip()
if self.target_state is None:
return
if self.target_state == pool_wallet_info.current.state:
self.target_state = None
raise ValueError("Internal error")
if (
self.target_state.state in [FARMING_TO_POOL, SELF_POOLING]
and pool_wallet_info.current.state == LEAVING_POOL
):
leave_height = tip_height + pool_wallet_info.current.relative_lock_height
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = self.wallet_state_manager.blockchain.block_record(curr.prev_hash)
self.log.info(f"Last transaction block height: {curr.height} OK to leave at height {leave_height}")
# Add some buffer (+2) to reduce chances of a reorg
if curr.height > leave_height + 2:
unconfirmed: List[
TransactionRecord
] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
next_tip: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert next_tip is not None
if any([rem.name() == next_tip.name() for tx_rec in unconfirmed for rem in tx_rec.removals]):
self.log.info("Already submitted second transaction, will not resubmit.")
return
self.log.info(f"Attempting to leave from\n{pool_wallet_info.current}\nto\n{self.target_state}")
assert self.target_state.version == POOL_PROTOCOL_VERSION
assert pool_wallet_info.current.state == LEAVING_POOL
assert self.target_state.target_puzzle_hash is not None
if self.target_state.state == SELF_POOLING:
assert self.target_state.relative_lock_height == 0
assert self.target_state.pool_url is None
elif self.target_state.state == FARMING_TO_POOL:
assert self.target_state.relative_lock_height >= self.MINIMUM_RELATIVE_LOCK_HEIGHT
assert self.target_state.pool_url is not None
tx_record = await self.generate_travel_transaction(self.next_transaction_fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
async def have_unconfirmed_transaction(self) -> bool:
unconfirmed: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
self.wallet_id
)
return len(unconfirmed) > 0
async def get_confirmed_balance(self, _=None) -> uint64:
amount: uint64 = uint64(0)
if (await self.get_current_state()).current.state == SELF_POOLING:
unspent_coin_records: List[WalletCoinRecord] = list(
await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.wallet_id)
)
for record in unspent_coin_records:
if record.coinbase:
amount = uint64(amount + record.coin.amount)
return amount
async def get_unconfirmed_balance(self, record_list=None) -> uint64:
return await self.get_confirmed_balance(record_list)
async def get_spendable_balance(self, record_list=None) -> uint64:
return await self.get_confirmed_balance(record_list)
async def get_pending_change_balance(self) -> uint64:
return uint64(0)
async def get_max_send_amount(self, record_list=None) -> uint64:
return uint64(0)
| 44.724215 | 120 | 0.674838 |
ae1d518c86b9f8ef0f5314a63fa4bd3e7873ec0c | 86 | py | Python | python/modules/one/__init__.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | python/modules/one/__init__.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | 1 | 2021-03-10T04:00:01.000Z | 2021-03-10T04:00:01.000Z | python/modules/one/__init__.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | # This will allow is to import one_a when we do from one import *
__all__ = ['one_a']
| 28.666667 | 65 | 0.709302 |
64219329903ed2e255421fc7eac72a9e60efb12d | 2,378 | py | Python | threads.py | mode9/xlsxstyle | 6311c6088672d6a39555bd735e1b443583692289 | [
"MIT"
] | null | null | null | threads.py | mode9/xlsxstyle | 6311c6088672d6a39555bd735e1b443583692289 | [
"MIT"
] | null | null | null | threads.py | mode9/xlsxstyle | 6311c6088672d6a39555bd735e1b443583692289 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
import datetime
import os
from PyQt5 import QtCore
from PySide2.QtWidgets import QTextBrowser, QProgressBar
from openpyxl import Workbook
from handlers import WorkSheetHandler
class CreateThread(QtCore.QThread):
finished = QtCore.pyqtSignal()
def __init__(self, org_wb: Workbook, target_wb: Workbook, text_browser: QTextBrowser, parent=None):
super().__init__(parent=parent)
self.org_wb = org_wb
self.target_wb = target_wb
self.text_browser = text_browser
def get_handler(self, workbook: Workbook):
SHEET_NAME = "GI"
hdlr = WorkSheetHandler(workbook, sheet_name=SHEET_NAME)
self.text_browser.insertPlainText(f"INFO: 시트 로딩 완료 ({SHEET_NAME})\n")
return hdlr
def get_new_filename(self) -> str:
today: datetime.date = datetime.date.today()
file_dir: str = os.path.join(os.path.dirname(__file__), today.strftime("%Y%m%d"))
file_dir: str = self.check_unique_filename(file_dir)
return file_dir
def check_unique_filename(self, filename: str, extra: str = '') -> str:
if os.path.isfile(filename + extra):
extra: str = str(int(extra) + 1) if extra else '1'
return self.check_unique_filename(filename, extra)
return filename
def run(self) -> None:
org_handler = self.get_handler(self.org_wb)
target_handler = self.get_handler(self.target_wb)
org_handler.copy_styles(target_handler)
self.text_browser.insertPlainText("INFO: 스타일 복사 완료\n")
fn = self.get_new_filename() + '.xlsx'
self.target_wb.save(os.path.join(os.path.abspath(__file__), fn))
self.text_browser.insertPlainText(f"INFO: 파일 생성 완료 ({fn}) \n")
self.finished.emit()
self.org_wb.close()
self.target_wb.close()
class ProgressThread(QtCore.QThread):
def __init__(self, pg_bar: QProgressBar, parent=None):
super().__init__(parent=parent)
self.pg_bar = pg_bar
self._status = False
def run(self):
self.pg_bar.setRange(0, 100)
def toggle_status(self):
self._status = not self._status
maximum = 0 if self._status else 100
self.pg_bar.setRange(0, maximum)
@property
def status(self):
return self._status
def __del__(self):
self.wait() | 33.492958 | 103 | 0.666106 |
cea9879cd87b98926147d506c5037b6dca2dd136 | 1,615 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/duration_extractor_config.py | AhmedLeithy/Recognizers-Text | f5426e38a09d3974fc0979b7803a4cd17258ea62 | [
"MIT"
] | 688 | 2019-05-08T02:56:21.000Z | 2022-03-30T07:26:15.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/duration_extractor_config.py | AhmedLeithy/Recognizers-Text | f5426e38a09d3974fc0979b7803a4cd17258ea62 | [
"MIT"
] | 840 | 2019-05-07T07:00:02.000Z | 2022-03-30T14:52:11.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/duration_extractor_config.py | AhmedLeithy/Recognizers-Text | f5426e38a09d3974fc0979b7803a4cd17258ea62 | [
"MIT"
] | 283 | 2019-05-07T07:52:12.000Z | 2022-03-27T02:27:58.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict
from recognizers_text import RegExpUtility
from recognizers_number.culture import Culture, CultureInfo
from recognizers_number_with_unit.number_with_unit.chinese.extractors import ChineseNumberWithUnitExtractorConfiguration
from ...resources.chinese_date_time import ChineseDateTime
from ..constants import Constants
class ChineseDurationExtractorConfiguration(ChineseNumberWithUnitExtractorConfiguration):
@property
def suffix_list(self) -> Dict[str, str]:
return self._suffix_list
@property
def prefix_list(self) -> Dict[str, str]:
return self._prefix_list
@property
def ambiguous_unit_list(self) -> str:
return self._ambiguous_unit_list
@property
def extract_type(self) -> str:
return self._extract_type
@property
def year_regex(self):
return self._year_regex
@property
def half_suffix_regex(self):
return self._half_suffix_regex
def __init__(self):
super().__init__(CultureInfo(Culture.Chinese))
self._year_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DurationYearRegex
)
self._half_suffix_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.DurationHalfSuffixRegex
)
self._extract_type = Constants.SYS_DATETIME_DURATION
self._suffix_list = ChineseDateTime.DurationSuffixList
self._prefix_list = dict()
self._ambiguous_unit_list = ChineseDateTime.DurationAmbiguousUnits
| 32.3 | 120 | 0.743034 |
fd28be08e1710bde1dcf461fe87f6442b3d06cc5 | 4,115 | py | Python | examples/slim/preprocessing/cifarnet_preprocessing.py | TensorFlowHub/TensorFlowOnSpark | 1120406f69a827cb6ca9722e9778499d52891c96 | [
"Apache-2.0"
] | 1 | 2017-02-15T14:03:50.000Z | 2017-02-15T14:03:50.000Z | examples/slim/preprocessing/cifarnet_preprocessing.py | leereilly/TensorFlowOnSpark | 42606480125e0cd163fdf5e8ef977b0ced61beb3 | [
"Apache-2.0"
] | null | null | null | examples/slim/preprocessing/cifarnet_preprocessing.py | leereilly/TensorFlowOnSpark | 42606480125e0cd163fdf5e8ef977b0ced61beb3 | [
"Apache-2.0"
] | 1 | 2020-12-06T03:15:24.000Z | 2020-12-06T03:15:24.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images in CIFAR-10.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_PADDING = 4
slim = tf.contrib.slim
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
Returns:
A preprocessed image.
"""
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_whitening(distorted_image)
def preprocess_for_eval(image, output_height, output_width):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
Returns:
A preprocessed image.
"""
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
# Resize and crop if needed.
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_whitening(resized_image)
def preprocess_image(image, output_height, output_width, is_training=False):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width)
else:
return preprocess_for_eval(image, output_height, output_width)
| 35.782609 | 80 | 0.684812 |
4159cada94ab83900b9517f60873408103c2da66 | 16,979 | py | Python | venv/lib/python3.8/site-packages/matplotlib/testing/compare.py | pashapishdad/Artificial_Intelligence_A2 | a151370b2453fd66227b2e4815c100f47317246c | [
"Apache-2.0"
] | 4 | 2020-07-15T20:03:28.000Z | 2021-06-09T11:23:06.000Z | seleniumenv/lib/python3.8/site-packages/matplotlib/testing/compare.py | ethanmoyer/NIREUS | dfe1fcb078bebb38a6a75a051bc8e1b4661d986c | [
"MIT"
] | 5 | 2021-04-25T08:16:09.000Z | 2022-03-12T00:42:14.000Z | seleniumenv/lib/python3.8/site-packages/matplotlib/testing/compare.py | ethanmoyer/NIREUS | dfe1fcb078bebb38a6a75a051bc8e1b4661d986c | [
"MIT"
] | 3 | 2019-05-18T21:32:31.000Z | 2019-07-26T11:05:46.000Z | """
Provides a collection of utilities for comparing (image) results.
"""
import atexit
import hashlib
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
from tempfile import TemporaryFile
import numpy as np
import matplotlib as mpl
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib import cbook
__all__ = ['compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting *purpose* before the file's extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def get_cache_dir():
cachedir = mpl.get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
try:
Path(cache_dir).mkdir(parents=True, exist_ok=True)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
if path.endswith('.pdf'):
md5.update(str(mpl._get_executable_info("gs").version)
.encode('utf-8'))
elif path.endswith('.svg'):
md5.update(str(mpl._get_executable_info("inkscape").version)
.encode('utf-8'))
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(cmdline, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
# Modified from https://bugs.python.org/issue25567.
_find_unsafe_bytes = re.compile(br'[^a-zA-Z0-9_@%+=:,./-]').search
def _shlex_quote_bytes(b):
return (b if _find_unsafe_bytes(b) is None
else b"'" + b.replace(b"'", b"'\"'\"'") + b"'")
class _ConverterError(Exception):
pass
class _Converter:
def __init__(self):
self._proc = None
# Explicitly register deletion from an atexit handler because if we
# wait until the object is GC'd (which occurs later), then some module
# globals (e.g. signal.SIGKILL) has already been set to None, and
# kill() doesn't work anymore...
atexit.register(self.__del__)
def __del__(self):
if self._proc:
self._proc.kill()
self._proc.wait()
for stream in filter(None, [self._proc.stdin,
self._proc.stdout,
self._proc.stderr]):
stream.close()
self._proc = None
def _read_until(self, terminator):
"""Read until the prompt is reached."""
buf = bytearray()
while True:
c = self._proc.stdout.read(1)
if not c:
raise _ConverterError
buf.extend(c)
if buf.endswith(terminator):
return bytes(buf[:-len(terminator)])
class _GSConverter(_Converter):
def __call__(self, orig, dest):
if not self._proc:
self._proc = subprocess.Popen(
[mpl._get_executable_info("gs").executable,
"-dNOSAFER", "-dNOPAUSE", "-sDEVICE=png16m"],
# As far as I can see, ghostscript never outputs to stderr.
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
self._read_until(b"\nGS")
except _ConverterError:
raise OSError("Failed to start Ghostscript")
def encode_and_escape(name):
return (os.fsencode(name)
.replace(b"\\", b"\\\\")
.replace(b"(", br"\(")
.replace(b")", br"\)"))
self._proc.stdin.write(
b"<< /OutputFile ("
+ encode_and_escape(dest)
+ b") >> setpagedevice ("
+ encode_and_escape(orig)
+ b") run flush\n")
self._proc.stdin.flush()
# GS> if nothing left on the stack; GS<n> if n items left on the stack.
err = self._read_until(b"GS")
stack = self._read_until(b">")
if stack or not os.path.exists(dest):
stack_size = int(stack[1:]) if stack else 0
self._proc.stdin.write(b"pop\n" * stack_size)
# Using the systemencoding should at least get the filenames right.
raise ImageComparisonFailure(
(err + b"GS" + stack + b">")
.decode(sys.getfilesystemencoding(), "replace"))
class _SVGConverter(_Converter):
def __call__(self, orig, dest):
if (not self._proc # First run.
or self._proc.poll() is not None): # Inkscape terminated.
env = os.environ.copy()
# If one passes e.g. a png file to Inkscape, it will try to
# query the user for conversion options via a GUI (even with
# `--without-gui`). Unsetting `DISPLAY` prevents this (and causes
# GTK to crash and Inkscape to terminate, but that'll just be
# reported as a regular exception below).
env.pop("DISPLAY", None) # May already be unset.
# Do not load any user options.
env["INKSCAPE_PROFILE_DIR"] = os.devnull
# Old versions of Inkscape (0.48.3.1, used on Travis as of now)
# seem to sometimes deadlock when stderr is redirected to a pipe,
# so we redirect it to a temporary file instead. This is not
# necessary anymore as of Inkscape 0.92.1.
stderr = TemporaryFile()
self._proc = subprocess.Popen(
["inkscape", "--without-gui", "--shell"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=stderr, env=env)
# Slight abuse, but makes shutdown handling easier.
self._proc.stderr = stderr
try:
self._read_until(b"\n>")
except _ConverterError:
raise OSError("Failed to start Inkscape in interactive mode")
# Inkscape uses glib's `g_shell_parse_argv`, which has a consistent
# behavior across platforms, so we can just use `shlex.quote`.
orig_b, dest_b = map(_shlex_quote_bytes,
map(os.fsencode, [orig, dest]))
if b"\n" in orig_b or b"\n" in dest_b:
# Who knows whether the current folder name has a newline, or if
# our encoding is even ASCII compatible... Just fall back on the
# slow solution (Inkscape uses `fgets` so it will always stop at a
# newline).
return make_external_conversion_command(lambda old, new: [
'inkscape', '-z', old, '--export-png', new])(orig, dest)
self._proc.stdin.write(orig_b + b" --export-png=" + dest_b + b"\n")
self._proc.stdin.flush()
try:
self._read_until(b"\n>")
except _ConverterError:
# Inkscape's output is not localized but gtk's is, so the output
# stream probably has a mixed encoding. Using the filesystem
# encoding should at least get the filenames right...
self._proc.stderr.seek(0)
raise ImageComparisonFailure(
self._proc.stderr.read().decode(
sys.getfilesystemencoding(), "replace"))
def _update_converter():
try:
mpl._get_executable_info("gs")
except mpl.ExecutableNotFoundError:
pass
else:
converter['pdf'] = converter['eps'] = _GSConverter()
try:
mpl._get_executable_info("inkscape")
except mpl.ExecutableNotFoundError:
pass
else:
converter['svg'] = _SVGConverter()
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Return the list of file formats that `.compare_images` can compare
on this system.
Returns
-------
supported_formats : list of str
E.g. ``['png', 'pdf', 'svg', 'eps']``.
"""
return ['png', *converter]
def convert(filename, cache):
"""
Convert the named file to png; return the name of the created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a
hash of the exact contents of the input file. There is no limit on the
size of the cache, so it may need to be manually cleared periodically.
"""
base, extension = os.fspath(filename).rsplit('.', 1)
if extension not in converter:
import pytest
pytest.skip(f"Don't know how to convert {extension} files to png")
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah, ad = actual_image.shape
ew, eh, ed = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expected_image, actual_image):
"Calculate the per-pixel errors, then compute the root mean square error."
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual : str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
Determines the output format. If called from image_comparison
decorator, this should be True. (default=False)
Returns
-------
comparison_result : None or dict or str
Return *None* if the images are equal within the given tolerance.
If the images differ, the return value depends on *in_decorator*.
If *in_decorator* is true, a dict with the following entries is
returned:
- *rms*: The RMS of the image difference.
- *expected*: The filename of the expected image.
- *actual*: The filename of the actual image.
- *diff_image*: The filename of the difference image.
- *tol*: The comparison tolerance.
Otherwise, a human-readable multi-line string representation of this
information is returned.
Examples
--------
::
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images(img1, img2, 0.001)
"""
from matplotlib import _png
actual = os.fspath(actual)
if not os.path.exists(actual):
raise Exception("Output image %s does not exist." % actual)
if os.stat(actual).st_size == 0:
raise Exception("Output image file %s is empty." % actual)
# Convert the image to png
expected = os.fspath(expected)
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
extension = expected.split('.')[-1]
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
with open(expected, "rb") as expected_file:
expected_image = _png.read_png_int(expected_file)[:, :, :3]
with open(actual, "rb") as actual_file:
actual_image = _png.read_png_int(actual_file)[:, :, :3]
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
diff_image = make_test_filename(actual, 'failed-diff')
if tol <= 0:
if np.array_equal(expected_image, actual_image):
return None
# convert to signed integers, so that the images can be subtracted without
# overflow
expected_image = expected_image.astype(np.int16)
actual_image = actual_image.astype(np.int16)
rms = calculate_rms(expected_image, actual_image)
if rms <= tol:
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
'''
Parameters
----------
expected : str
File path of expected image.
actual : str
File path of actual image.
output : str
File path to save difference image to.
'''
# Drop alpha channels, similarly to compare_images.
from matplotlib import _png
with open(expected, "rb") as expected_file:
expected_image = _png.read_png(expected_file)[..., :3]
with open(actual, "rb") as actual_file:
actual_image = _png.read_png(actual_file)[..., :3]
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
expected_image = np.array(expected_image).astype(float)
actual_image = np.array(actual_image).astype(float)
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
abs_diff_image = np.abs(expected_image - actual_image)
# expand differences in luminance domain
abs_diff_image *= 255 * 10
save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
with open(output, "wb") as output_file:
_png.write_png(save_image_np, output_file)
| 35.972458 | 79 | 0.611461 |
b0e4b74c58af9f103600e38225ccc6b453d55b8d | 3,852 | py | Python | hubspot/crm/contacts/models/previous_page.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/crm/contacts/models/previous_page.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/crm/contacts/models/previous_page.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Contacts
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.contacts.configuration import Configuration
class PreviousPage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"before": "str", "link": "str"}
attribute_map = {"before": "before", "link": "link"}
def __init__(self, before=None, link=None, local_vars_configuration=None): # noqa: E501
"""PreviousPage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._before = None
self._link = None
self.discriminator = None
self.before = before
if link is not None:
self.link = link
@property
def before(self):
"""Gets the before of this PreviousPage. # noqa: E501
:return: The before of this PreviousPage. # noqa: E501
:rtype: str
"""
return self._before
@before.setter
def before(self, before):
"""Sets the before of this PreviousPage.
:param before: The before of this PreviousPage. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and before is None: # noqa: E501
raise ValueError("Invalid value for `before`, must not be `None`") # noqa: E501
self._before = before
@property
def link(self):
"""Gets the link of this PreviousPage. # noqa: E501
:return: The link of this PreviousPage. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this PreviousPage.
:param link: The link of this PreviousPage. # noqa: E501
:type: str
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreviousPage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PreviousPage):
return True
return self.to_dict() != other.to_dict()
| 28.533333 | 139 | 0.597352 |
5fb9179c1a6be8ff2c455e52cad60fb6d954459c | 233 | py | Python | data_science/np_br3.py | HansBlackCat/Python | 32c69f1f749a46b5bf1a305e385d96b2449c2a28 | [
"Apache-2.0"
] | null | null | null | data_science/np_br3.py | HansBlackCat/Python | 32c69f1f749a46b5bf1a305e385d96b2449c2a28 | [
"Apache-2.0"
] | null | null | null | data_science/np_br3.py | HansBlackCat/Python | 32c69f1f749a46b5bf1a305e385d96b2449c2a28 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
a='k'
x=np.linspace(0,5,50)
y=np.linspace(0,5,50)[:,np.newaxis]
z=np.sin(x)**10+np.cos(10+y*x)*np.cos(x)
plt.imshow(z,origin='lower',exten=[0,5,0,5],cmap='viridis')
plt.colorbar(); | 23.3 | 59 | 0.682403 |
5142b8be0fd894c9fb1dc74b5d975be789a53308 | 267 | py | Python | tests/integration/test_health_endpoint.py | JumaKahiga/cyclo-ops | 87fa24ae5574e9541ce8806401bff410862c961b | [
"MIT"
] | null | null | null | tests/integration/test_health_endpoint.py | JumaKahiga/cyclo-ops | 87fa24ae5574e9541ce8806401bff410862c961b | [
"MIT"
] | null | null | null | tests/integration/test_health_endpoint.py | JumaKahiga/cyclo-ops | 87fa24ae5574e9541ce8806401bff410862c961b | [
"MIT"
] | null | null | null | from tests import EndpointTestCase
class TestHealthCheck(EndpointTestCase):
def test_health_check(self):
resp = self.app_client.get('/api/health')
self.assertEqual(resp.status_code, 200)
assert resp.json == {'service_status': 'healthy'} | 29.666667 | 57 | 0.707865 |
17c4561a640dc2df5b57a548b62b27e7510ec596 | 23,331 | py | Python | hydrus/client/gui/ClientGUITime.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | 1,417 | 2015-01-22T00:50:30.000Z | 2022-03-30T18:44:55.000Z | hydrus/client/gui/ClientGUITime.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | 975 | 2015-01-05T01:41:40.000Z | 2022-03-31T06:01:50.000Z | hydrus/client/gui/ClientGUITime.py | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | 163 | 2015-02-04T13:09:35.000Z | 2022-03-23T01:00:05.000Z | import os
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.client import ClientConstants as CC
from hydrus.client.gui import ClientGUIScrolledPanels
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import QtPorting as QP
from hydrus.client.gui.widgets import ClientGUICommon
from hydrus.client.importing import ClientImporting
from hydrus.client.importing.options import ClientImportOptions
class EditCheckerOptions( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, checker_options ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
help_button = ClientGUICommon.BetterBitmapButton( self, CC.global_pixmaps().help, self._ShowHelp )
help_button.setToolTip( 'Show help regarding these checker options.' )
help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this panel -->', object_name = 'HydrusIndeterminate' )
from hydrus.client import ClientDefaults
defaults_panel = ClientGUICommon.StaticBox( self, 'reasonable defaults' )
defaults_1 = ClientGUICommon.BetterButton( defaults_panel, 'thread', self.SetValue, ClientDefaults.GetDefaultCheckerOptions( 'thread' ) )
defaults_2 = ClientGUICommon.BetterButton( defaults_panel, 'slow thread', self.SetValue, ClientDefaults.GetDefaultCheckerOptions( 'slow thread' ) )
defaults_3 = ClientGUICommon.BetterButton( defaults_panel, 'faster tag subscription', self.SetValue, ClientDefaults.GetDefaultCheckerOptions( 'fast tag subscription' ) )
defaults_4 = ClientGUICommon.BetterButton( defaults_panel, 'medium tag/artist subscription', self.SetValue, ClientDefaults.GetDefaultCheckerOptions( 'artist subscription' ) )
defaults_5 = ClientGUICommon.BetterButton( defaults_panel, 'slower tag subscription', self.SetValue, ClientDefaults.GetDefaultCheckerOptions( 'slow tag subscription' ) )
#
# add statictext or whatever that will update on any updates above to say 'given velocity of blah and last check at blah, next check in 5 mins'
# or indeed this could just take the file_seed cache and last check of the caller, if there is one
# this would be more useful to the user, to know 'right, on ok, it'll refresh in 30 mins'
# this is actually more complicated--it also needs last check time to calc a fresh file velocity based on new death_file_velocity
#
min_unit_value = 0
max_unit_value = 1000
min_time_delta = 60
self._death_file_velocity = VelocityCtrl( self, min_unit_value, max_unit_value, min_time_delta, days = True, hours = True, minutes = True, per_phrase = 'in', unit = 'files' )
self._flat_check_period_checkbox = QW.QCheckBox( self )
#
if HG.client_controller.new_options.GetBoolean( 'advanced_mode' ):
never_faster_than_min = 1
never_slower_than_min = 1
flat_check_period_min = 1
else:
never_faster_than_min = 30
never_slower_than_min = 600
flat_check_period_min = 180
self._reactive_check_panel = ClientGUICommon.StaticBox( self, 'reactive checking' )
self._intended_files_per_check = QP.MakeQSpinBox( self._reactive_check_panel, min=1, max=1000 )
self._intended_files_per_check.setToolTip( 'How many new files you want the checker to find on each check. If a source is producing about 2 files a day, and this is set to 6, you will probably get a check every three days. You probably want this to be a low number, like 1-4.' )
self._never_faster_than = TimeDeltaCtrl( self._reactive_check_panel, min = never_faster_than_min, days = True, hours = True, minutes = True, seconds = True )
self._never_faster_than.setToolTip( 'Even if the download source produces many new files, the checker will never ask for a check more often than this. This is a safety measure.' )
self._never_slower_than = TimeDeltaCtrl( self._reactive_check_panel, min = never_slower_than_min, days = True, hours = True, minutes = True, seconds = True )
self._never_slower_than.setToolTip( 'Even if the download source slows down significantly, the checker will make sure it checks at least this often anyway, just to catch a future wave in time.' )
#
self._static_check_panel = ClientGUICommon.StaticBox( self, 'static checking' )
self._flat_check_period = TimeDeltaCtrl( self._static_check_panel, min = flat_check_period_min, days = True, hours = True, minutes = True, seconds = True )
self._flat_check_period.setToolTip( 'Always use the same check delay. It is based on the time the last check completed, not the time the last check was due. If you want once a day with no skips, try setting this to 23 hours.' )
#
self.SetValue( checker_options )
#
defaults_panel.Add( defaults_1, CC.FLAGS_EXPAND_PERPENDICULAR )
defaults_panel.Add( defaults_2, CC.FLAGS_EXPAND_PERPENDICULAR )
defaults_panel.Add( defaults_3, CC.FLAGS_EXPAND_PERPENDICULAR )
defaults_panel.Add( defaults_4, CC.FLAGS_EXPAND_PERPENDICULAR )
defaults_panel.Add( defaults_5, CC.FLAGS_EXPAND_PERPENDICULAR )
#
#
label = 'This checks more or less frequently based on how fast the download source is producing new files.'
st = ClientGUICommon.BetterStaticText( self._reactive_check_panel, label = label )
st.setWordWrap( True )
rows = []
rows.append( ( 'intended new files per check: ', self._intended_files_per_check ) )
rows.append( ( 'never check faster than once per: ', self._never_faster_than ) )
rows.append( ( 'never check slower than once per: ', self._never_slower_than ) )
gridbox = ClientGUICommon.WrapInGrid( self._reactive_check_panel, rows )
self._reactive_check_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
self._reactive_check_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'check period: ', self._flat_check_period ) )
gridbox = ClientGUICommon.WrapInGrid( self._static_check_panel, rows )
self._static_check_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'stop checking if new files found falls below: ', self._death_file_velocity ) )
rows.append( ( 'just check at a static, regular interval: ', self._flat_check_period_checkbox ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, help_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
label = 'If you do not understand this panel, use the buttons! The defaults are fine for most purposes!'
st = ClientGUICommon.BetterStaticText( self._reactive_check_panel, label = label )
st.setWordWrap( True )
st.setObjectName( 'HydrusWarning' )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, defaults_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
if HG.client_controller.new_options.GetBoolean( 'advanced_mode' ):
label = 'As you are in advanced mode, these options have extremely low limits. This is intended only for testing and small scale private network tasks. Do not use very fast check times for real world use on public websites, as it is wasteful and rude, hydrus will be overloaded with high-CPU parsing work, and you may get your IP banned.'
st = ClientGUICommon.BetterStaticText( self, label = label )
st.setObjectName( 'HydrusWarning' )
st.setWordWrap( True )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._reactive_check_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._static_check_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.widget().setLayout( vbox )
#
self._flat_check_period_checkbox.clicked.connect( self.EventFlatPeriodCheck )
def _ShowHelp( self ):
help = 'The intention of this object is to govern how frequently the watcher or subscription checks for new files--and when it should stop completely.'
help += os.linesep * 2
help += 'PROTIP: Do not change anything here unless you understand what it means!'
help += os.linesep * 2
help += 'In general, checkers can and should be set up to check faster or slower based on how fast new files are coming in. This is polite to the server you are talking to and saves you CPU and bandwidth. The rate of new files is called the \'file velocity\' and is based on how many files appeared in a certain period before the _most recent check time_.'
help += os.linesep * 2
help += 'Once the first check is done and an initial file velocity is established, the time to the next check will be based on what you set for the \'intended files per check\'. If the current file velocity is 10 files per 24 hours, and you set the intended files per check to 5 files, the checker will set the next check time to be 12 hours after the previous check time.'
help += os.linesep * 2
help += 'After a check is completed, the new file velocity and next check time is calculated, so when files are being posted frequently, it will check more often. When things are slow, it will slow down as well. There are also minimum and maximum check periods to smooth out the bumps.'
help += os.linesep * 2
help += 'But if you would rather just check at a fixed rate, check the checkbox and you will get a simpler \'static checking\' panel.'
help += os.linesep * 2
help += 'If the \'file velocity\' drops below a certain amount, the checker considers the source of files dead and will stop checking. If it falls into this state but you think there might have since been a rush of new files, hit the watcher or subscription\'s \'check now\' button in an attempt to revive the checker. If there are new files, it will start checking again until they drop off once more.'
help += os.linesep * 2
help += 'If you are still not comfortable with how this system works, the \'reasonable defaults\' are good fallbacks. Most of the time, setting some reasonable rules and leaving checkers to do their work is the best way to deal with this stuff, rather than obsessing over the exact perfect values you want for each situation.'
QW.QMessageBox.information( self, 'Information', help )
def _UpdateEnabledControls( self ):
if self._flat_check_period_checkbox.isChecked():
self._reactive_check_panel.hide()
self._static_check_panel.show()
else:
self._reactive_check_panel.show()
self._static_check_panel.hide()
def EventFlatPeriodCheck( self ):
self._UpdateEnabledControls()
def GetValue( self ):
death_file_velocity = self._death_file_velocity.GetValue()
intended_files_per_check = self._intended_files_per_check.value()
if self._flat_check_period_checkbox.isChecked():
never_faster_than = self._flat_check_period.GetValue()
never_slower_than = never_faster_than
else:
never_faster_than = self._never_faster_than.GetValue()
never_slower_than = self._never_slower_than.GetValue()
return ClientImportOptions.CheckerOptions( intended_files_per_check, never_faster_than, never_slower_than, death_file_velocity )
def SetValue( self, checker_options ):
( intended_files_per_check, never_faster_than, never_slower_than, death_file_velocity ) = checker_options.ToTuple()
self._intended_files_per_check.setValue( intended_files_per_check )
self._never_faster_than.SetValue( never_faster_than )
self._never_slower_than.SetValue( never_slower_than )
self._death_file_velocity.SetValue( death_file_velocity )
self._flat_check_period.SetValue( never_faster_than )
self._flat_check_period_checkbox.setChecked( never_faster_than == never_slower_than )
self._UpdateEnabledControls()
class TimeDeltaButton( QW.QPushButton ):
timeDeltaChanged = QC.Signal()
def __init__( self, parent, min = 1, days = False, hours = False, minutes = False, seconds = False, monthly_allowed = False ):
QW.QPushButton.__init__( self, parent )
self._min = min
self._show_days = days
self._show_hours = hours
self._show_minutes = minutes
self._show_seconds = seconds
self._monthly_allowed = monthly_allowed
self._value = self._min
self.setText( 'initialising' )
self.clicked.connect( self.EventButton )
def _RefreshLabel( self ):
value = self._value
if value is None:
text = 'monthly'
else:
text = HydrusData.TimeDeltaToPrettyTimeDelta( value )
self.setText( text )
def EventButton( self ):
with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit time delta' ) as dlg:
panel = ClientGUIScrolledPanels.EditSingleCtrlPanel( dlg )
control = TimeDeltaCtrl( panel, min = self._min, days = self._show_days, hours = self._show_hours, minutes = self._show_minutes, seconds = self._show_seconds, monthly_allowed = self._monthly_allowed )
control.SetValue( self._value )
panel.SetControl( control )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
value = panel.GetValue()
self.SetValue( value )
self.timeDeltaChanged.emit()
def GetValue( self ):
return self._value
def SetValue( self, value ):
self._value = value
self._RefreshLabel()
class TimeDeltaCtrl( QW.QWidget ):
timeDeltaChanged = QC.Signal()
def __init__( self, parent, min = 1, days = False, hours = False, minutes = False, seconds = False, monthly_allowed = False, monthly_label = 'monthly' ):
QW.QWidget.__init__( self, parent )
self._min = min
self._show_days = days
self._show_hours = hours
self._show_minutes = minutes
self._show_seconds = seconds
self._monthly_allowed = monthly_allowed
hbox = QP.HBoxLayout( margin = 0 )
if self._show_days:
self._days = QP.MakeQSpinBox( self, min=0, max=3653, width = 50 )
self._days.valueChanged.connect( self.EventChange )
QP.AddToLayout( hbox, self._days, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText(self,'days'), CC.FLAGS_CENTER_PERPENDICULAR )
if self._show_hours:
self._hours = QP.MakeQSpinBox( self, min=0, max=23, width = 45 )
self._hours.valueChanged.connect( self.EventChange )
QP.AddToLayout( hbox, self._hours, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText(self,'hours'), CC.FLAGS_CENTER_PERPENDICULAR )
if self._show_minutes:
self._minutes = QP.MakeQSpinBox( self, min=0, max=59, width = 45 )
self._minutes.valueChanged.connect( self.EventChange )
QP.AddToLayout( hbox, self._minutes, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText(self,'minutes'), CC.FLAGS_CENTER_PERPENDICULAR )
if self._show_seconds:
self._seconds = QP.MakeQSpinBox( self, min=0, max=59, width = 45 )
self._seconds.valueChanged.connect( self.EventChange )
QP.AddToLayout( hbox, self._seconds, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText(self,'seconds'), CC.FLAGS_CENTER_PERPENDICULAR )
if self._monthly_allowed:
self._monthly = QW.QCheckBox( self )
self._monthly.clicked.connect( self.EventChange )
QP.AddToLayout( hbox, self._monthly, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText(self,monthly_label), CC.FLAGS_CENTER_PERPENDICULAR )
self.setLayout( hbox )
def _UpdateEnables( self ):
value = self.GetValue()
if value is None:
if self._show_days:
self._days.setEnabled( False )
if self._show_hours:
self._hours.setEnabled( False )
if self._show_minutes:
self._minutes.setEnabled( False )
if self._show_seconds:
self._seconds.setEnabled( False )
else:
if self._show_days:
self._days.setEnabled( True )
if self._show_hours:
self._hours.setEnabled( True )
if self._show_minutes:
self._minutes.setEnabled( True )
if self._show_seconds:
self._seconds.setEnabled( True )
def EventChange( self ):
value = self.GetValue()
if value is not None and value < self._min:
self.SetValue( self._min )
self._UpdateEnables()
self.timeDeltaChanged.emit()
def GetValue( self ):
if self._monthly_allowed and self._monthly.isChecked():
return None
value = 0
if self._show_days:
value += self._days.value() * 86400
if self._show_hours:
value += self._hours.value() * 3600
if self._show_minutes:
value += self._minutes.value() * 60
if self._show_seconds:
value += self._seconds.value()
return value
def SetValue( self, value ):
if self._monthly_allowed:
if value is None:
self._monthly.setChecked( True )
else:
self._monthly.setChecked( False )
if value is not None:
if value < self._min:
value = self._min
if self._show_days:
self._days.setValue( value // 86400 )
value %= 86400
if self._show_hours:
self._hours.setValue( value // 3600 )
value %= 3600
if self._show_minutes:
self._minutes.setValue( value // 60 )
value %= 60
if self._show_seconds:
self._seconds.setValue( value )
self._UpdateEnables()
class VelocityCtrl( QW.QWidget ):
velocityChanged = QC.Signal()
def __init__( self, parent, min_unit_value, max_unit_value, min_time_delta, days = False, hours = False, minutes = False, seconds = False, per_phrase = 'per', unit = None ):
QW.QWidget.__init__( self, parent )
self._num = QP.MakeQSpinBox( self, min=min_unit_value, max=max_unit_value, width = 60 )
self._times = TimeDeltaCtrl( self, min = min_time_delta, days = days, hours = hours, minutes = minutes, seconds = seconds )
#
hbox = QP.HBoxLayout( margin = 0 )
QP.AddToLayout( hbox, self._num, CC.FLAGS_CENTER_PERPENDICULAR )
mid_text = per_phrase
if unit is not None:
mid_text = '{} {}'.format( unit, mid_text )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText(self,mid_text), CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._times, CC.FLAGS_CENTER_PERPENDICULAR )
self.setLayout( hbox )
self._num.valueChanged.connect( self.velocityChanged )
self._times.timeDeltaChanged.connect( self.velocityChanged )
def GetValue( self ):
num = self._num.value()
time_delta = self._times.GetValue()
return ( num, time_delta )
def setToolTip( self, text ):
QW.QWidget.setToolTip( self, text )
for c in self.children():
if isinstance( c, QW.QWidget ):
c.setToolTip( text )
def SetValue( self, velocity ):
( num, time_delta ) = velocity
self._num.setValue( num )
self._times.SetValue( time_delta )
| 38.310345 | 411 | 0.591145 |
5cd942ae84e35161434781da50dc6f706af0a0dd | 8,130 | py | Python | src/ansible_navigator/ui_framework/menu_builder.py | didib/ansible-navigator | 62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36 | [
"Apache-2.0"
] | null | null | null | src/ansible_navigator/ui_framework/menu_builder.py | didib/ansible-navigator | 62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36 | [
"Apache-2.0"
] | 1 | 2022-02-04T02:38:15.000Z | 2022-02-04T02:38:15.000Z | src/ansible_navigator/ui_framework/menu_builder.py | ganeshrn/ansible-navigator | 1580b5e4a4d715fa4bb844bfeeb40f1ac8e628f6 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-11-17T09:45:18.000Z | 2021-11-17T09:45:18.000Z | """ build a menu
"""
import curses
import enum
import re
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
from .curses_defs import CursesLine
from .curses_defs import CursesLinePart
from .curses_defs import CursesLines
from .ui_config import UIConfig
from .utils import convert_percentage
from .utils import distribute
class MenuBuilder:
# pylint: disable=too-few-public-methods
"""build a menu from list of dicts"""
def __init__(
self,
pbar_width: int,
screen_w: int,
number_colors: int,
color_menu_item: Callable,
ui_config: UIConfig,
):
# pylint: disable=too-many-arguments
self._number_colors = number_colors
self._pbar_width = pbar_width
self._screen_w = screen_w
self._color_menu_item = color_menu_item
self._ui_config = ui_config
def build(self, dicts: List, cols: List, indicies) -> Tuple[CursesLines, CursesLines]:
"""main entry point for menu builer"""
return self._menu(dicts, cols, indicies)
def _menu(self, dicts: List, cols: List[str], indicies) -> Tuple[CursesLines, CursesLines]:
"""Build a text menu from a list of dicts given columns(root keys)
:param dicts: A list of dicts
:param cols: The columns (keys) to use in the dicts
:param indicies: A range of what's showing in the UI
:return: the heading and body of the menu
:rtype: (CursesLines, CursesLines)
"""
line_prefix_w = len(str(len(dicts))) + len("|")
for idx in indicies:
convert_percentage(dicts[idx], cols, self._pbar_width)
lines = [[str(dicts[idx].get(c)) for c in cols] for idx in indicies]
colws = [
max([len(str(v)) for v in c])
for c in zip(*lines + [[re.sub("^__", "", col) for col in cols]])
]
# add a space
colws = [c + 1 for c in colws]
available = self._screen_w - line_prefix_w - 1 # scrollbar width
adj_colws = distribute(available, colws)
col_starts = [0]
for idx, colw in enumerate(adj_colws):
col_starts.append(colw + col_starts[idx])
menu_layout = tuple([col_starts, cols, adj_colws])
header = self._menu_header_line(menu_layout)
menu_layout = tuple([col_starts, cols, adj_colws, header])
menu_lines = self._menu_lines(dicts, menu_layout, indicies)
return tuple([header]), menu_lines
def _menu_header_line(self, menu_layout: Tuple[List, ...]) -> CursesLine:
"""Generate the menu header line
:param menu_layout: A tuple of menu details
:type menu_layout: tuple
:param menu_layout[0]: the starting in for each column
:type menu_layout[0]: List[int]
:param menu_layout[1]: the columns of the menu
:type menu_layout[1]: List[str]
:param menu_layout[2]: the adjusted column widths
:type menu_layout[2]: List[int]
:return: the menu head line
"""
_col_starts, cols, _adj_colws = menu_layout
return tuple(self._menu_header_line_part(colno, menu_layout) for colno in range(len(cols)))
@staticmethod
def _menu_header_line_part(colno: int, menu_layout: Tuple[List, ...]) -> CursesLinePart:
"""Generate one part of the menu header line
:param colno: The column number
:param menu_layout: A tuple of menu details
:type menu_layout: tuple
:param menu_layout[0]: the starting in for each column
:type menu_layout[0]: List[int]
:param menu_layout[1]: the columns of the menu
:type menu_layout[1]: List[str]
:param menu_layout[2]: the adjusted column widths
:type menu_layout[2]: List[int]
:return: the menu head line
"""
col_starts, cols, adj_colws = menu_layout
coltext = re.sub("^__", "", cols[colno])
coltext = re.sub("_", " ", coltext)
adj_entry = coltext[0 : adj_colws[colno]].upper()
# right justifyheader if progress
if cols[colno] == "__progress":
return CursesLinePart(
column=col_starts[colno] + adj_colws[colno] - len(adj_entry),
string=adj_entry,
color=0,
decoration=curses.A_UNDERLINE,
)
return CursesLinePart(
column=col_starts[colno], string=adj_entry, color=0, decoration=curses.A_UNDERLINE
)
def _menu_lines(
self, dicts: List[Dict], menu_layout: Tuple[List, ...], indicies
) -> CursesLines:
"""Generate all the menu lines
:params dicts: A list of dicts from which the menu will be generated
:param menu_layout: A tuple of menu details
:param menu_layout[0]: the starting in for each column
:type menu_layout[0]: List[int]
:param menu_layout[1]: the columns of the menu
:type menu_layout[1]: List[str]
:param menu_layout[2]: the adjusted column widths
:type menu_layout[2]: List[int]
:param menu_layout[3]: the menu header, used to determine justification
:type memu_layout[3]: CursesLine
:return: the menu lines
"""
return tuple(self._menu_line(dicts[idx], menu_layout) for idx in indicies)
def _menu_line(self, dyct: dict, menu_layout: Tuple[List, ...]) -> CursesLine:
"""Generate one the menu line
:param dyct: One dict from which the menu line will be generated
:param menu_layout: A tuple of menu details
:type menu_layout: tuple
:param menu_layout[0]: the starting in for each column
:type menu_layout[0]: List[int]
:param menu_layout[1]: the columns of the menu
:type menu_layout[1]: List[str]
:param menu_layout[2]: the adjusted column widths
:type menu_layout[2]: List[int]
:param menu_layout[3]: the menu header, used to determine justification
:type memu_layout[3]: CursesLine
:return: a menu line
"""
_col_starts, cols, _adj_colws, _header = menu_layout
menu_line = (dyct.get(c) for c in cols)
return tuple(
self._menu_line_part(colno, coltext, dyct, menu_layout)
for colno, coltext in enumerate(menu_line)
)
def _menu_line_part(
self, colno: int, coltext: Any, dyct: dict, menu_layout: Tuple[List, ...]
) -> CursesLinePart:
"""Generate one menu line part
:param colno: The column number of the line part
:param coltext: The text to be placed at the given column
:param dyct: the dict from which the menu line will be generated
:param menu_layout: A tuple of menu details
:type menu_layout: tuple
:param menu_layout[0]: the starting in for each column
:type menu_layout[0]: List[int]
:param menu_layout[1]: the columns of the menu
:type menu_layout[1]: List[str]
:param menu_layout[2]: the adjusted column widths
:type menu_layout[2]: List[int]
:param menu_layout[3]: the menu header, used to determine justification
:type memu_layout[3]: CursesLine
:return: a menu line part
"""
col_starts, cols, adj_colws, header = menu_layout
color, decoration = self._color_menu_item(colno, cols[colno], dyct)
text = str(coltext)[0 : adj_colws[colno]]
if (isinstance(coltext, (int, bool, float)) and not isinstance(coltext, enum.Enum)) or cols[
colno
].lower() == "__duration":
# right jusitfy on header if int, bool, float or "duration"
print_at = col_starts[colno] + len(header[colno][1]) - len(text)
elif cols[colno].lower() == "__progress":
# right justify in column if progress indicator
print_at = col_starts[colno] + adj_colws[colno] - len(text)
else:
# left justify
print_at = col_starts[colno]
return CursesLinePart(column=print_at, string=str(text), color=color, decoration=decoration)
| 39.466019 | 100 | 0.631119 |
3f6e76d6a76b63178ccf3c60a3f13e2502ae04f0 | 25 | py | Python | src/app/__init__.py | glucn/tofino | 64c603b2356f22eecbf8fd592f3656a613646c53 | [
"MIT"
] | 1 | 2021-01-04T10:07:41.000Z | 2021-01-04T10:07:41.000Z | src/app/__init__.py | glucn/tofino | 64c603b2356f22eecbf8fd592f3656a613646c53 | [
"MIT"
] | 231 | 2020-11-05T06:37:11.000Z | 2022-03-28T03:02:49.000Z | src/app/__init__.py | glucn/tofino | 64c603b2356f22eecbf8fd592f3656a613646c53 | [
"MIT"
] | null | null | null | """ Application code """
| 12.5 | 24 | 0.6 |
97dd697567f46ae50818c7a4b280437d738b5223 | 3,636 | py | Python | test.py | Adwaver4157/WorldModel_for_FinRL | 0aa0a984aadffe0f6f2e83e55678c0e9304fba05 | [
"MIT"
] | 1 | 2021-05-15T13:13:25.000Z | 2021-05-15T13:13:25.000Z | test.py | Adwaver4157/WorldModel_for_FinRL | 0aa0a984aadffe0f6f2e83e55678c0e9304fba05 | [
"MIT"
] | null | null | null | test.py | Adwaver4157/WorldModel_for_FinRL | 0aa0a984aadffe0f6f2e83e55678c0e9304fba05 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import datetime
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import BackTestStats, BaselineStats, BackTestPlot
import sys
sys.path.append("../FinRL-Library")
# Download and save the data in a pandas DataFrame:
data_df = YahooDownloader(start_date = '2009-01-01',
end_date = '2021-01-01',
ticker_list = ['AAPL']).fetch_data()
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST
tech_indicator_list=tech_indicator_list+['kdjk','open_2_sma','boll','close_10.0_le_5_c','wr_10','dma','trix']
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list = tech_indicator_list,
use_turbulence=False,
user_defined_feature = False)
data_df = fe.preprocess_data(data_df)
#train = data_split(data_df, start = config.START_DATE, end = config.START_TRADE_DATE)
#trade = data_split(data_df, start = config.START_TRADE_DATE, end = config.END_DATE)
train = data_split(data_df, start = '2009-01-01', end = '2019-01-01')
trade = data_split(data_df, start = '2019-01-01', end = '2021-01-01')
stock_dimension = len(train.tic.unique())
state_space = 1 + 2*stock_dimension + len(config.TECHNICAL_INDICATORS_LIST)*stock_dimension
env_kwargs = {
"hmax": 100,
"initial_amount": 100000,
"transaction_cost_pct": 0.001,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
"action_space": stock_dimension,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df = train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
agent = DRLAgent(env = env_train)
model_wm = agent.get_model("wm")
print('START TRAIN')
trained_wm = agent.train_model(model=model_wm,
tb_log_name='wm',
total_timesteps=30000)
trade = data_split(data_df, start = '2019-01-01', end = '2021-01-01')
e_trade_gym = StockTradingEnv(df = trade, **env_kwargs)
env_trade, obs_trade = e_trade_gym.get_sb_env()
df_account_value, df_actions = DRLAgent.DRL_prediction(model=trained_wm,
test_data = trade,
test_env = env_trade,
test_obs = obs_trade)
print("==============Get Backtest Results===========")
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
perf_stats_all = BackTestStats(account_value=df_account_value)
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv')
print("==============Compare to AAPL itself buy-and-hold===========")
BackTestPlot(account_value=df_account_value,
baseline_ticker = 'AAPL',
baseline_start = '2019-01-01',
baseline_end = '2021-01-01')
print("==============Get Baseline Stats===========")
baesline_perf_stats=BaselineStats('AAPL')
print("==============Get Baseline Stats===========")
baesline_perf_stats=BaselineStats('^GSPC')
print("==============Compare to S&P 500===========")
# S&P 500: ^GSPC
# Dow Jones Index: ^DJI
# NASDAQ 100: ^NDX
BackTestPlot(df_account_value, baseline_ticker = '^GSPC')
| 35.300971 | 109 | 0.668042 |
b4392b141a1925f819efd3eb7943e20bba21ccb4 | 420 | py | Python | picdown/picdown/items.py | gamegrd/discuz_spider | 06f16083f40b129e4d80478d57963ccced1c833c | [
"MIT"
] | 3 | 2017-01-11T03:26:26.000Z | 2020-07-18T11:25:18.000Z | picdown/picdown/items.py | andyzhuangyy/discuz_spider | 06f16083f40b129e4d80478d57963ccced1c833c | [
"MIT"
] | null | null | null | picdown/picdown/items.py | andyzhuangyy/discuz_spider | 06f16083f40b129e4d80478d57963ccced1c833c | [
"MIT"
] | 3 | 2016-09-03T03:44:22.000Z | 2020-07-18T11:25:20.000Z | # Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class PicdownItem(Item):
# define the fields for your item here like:
# name = Field()
#text = Field()
image_urls = Field()
image_paths = Field()
site_url = Field()
time = Field()
text = Field()
#link = Field()
pass
| 22.105263 | 51 | 0.645238 |
7e149227e4c6b6a26287abb501ac1e66bf02ac97 | 12,278 | py | Python | PythonSolid.py | lehnertu/solidpython | b8dd8d1f01ac0994b686e8e9ceaeb3fb572bb64e | [
"CC0-1.0"
] | null | null | null | PythonSolid.py | lehnertu/solidpython | b8dd8d1f01ac0994b686e8e9ceaeb3fb572bb64e | [
"CC0-1.0"
] | null | null | null | PythonSolid.py | lehnertu/solidpython | b8dd8d1f01ac0994b686e8e9ceaeb3fb572bb64e | [
"CC0-1.0"
] | null | null | null | import numpy as np
from copy import *
import vtk
# the minimum square distance for two points to be considered distinct
tolerance = np.square(0.01)
# square distance of two points
def sqr_dist(p1,p2):
return np.square(p1[0]-p2[0]) + np.square(p1[1]-p2[1]) + np.square(p1[2]-p2[2])
# this returns the cosine of the angle between two adjacent edges
# if they are fully aligned it returns 1.0 going to -1.0 with increasing angle
def angle(pts):
if len(pts) != 3: raise Exception("need 3 points to compute an angle.")
v1 = pts[1]-pts[0]
v1 = v1/np.linalg.norm(v1)
v2 = pts[2]-pts[1]
v2 = v2/np.linalg.norm(v2)
return np.dot(v1,v2)
def VisualizePointCloud(points):
"""
Display a set of points in 3D space
"""
pts = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
for p in points:
id = pts.InsertNextPoint(p)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id)
meshData = vtk.vtkPolyData()
meshData.SetPoints(pts)
meshData.SetVerts(vertices)
# map the triangle meshs into the scene
meshMapper = vtk.vtkPolyDataMapper()
meshMapper.SetInputData(meshData)
# add the actors to the scene
meshActor = vtk.vtkActor()
meshActor.SetMapper(meshMapper)
meshActor.GetProperty().SetColor(vtk.vtkNamedColors().GetColor3d("Yellow"))
# create a render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(vtk.vtkNamedColors().GetColor3d("SlateGray"))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(800,600)
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
style = vtk.vtkInteractorStyleTrackballCamera()
style.SetDefaultRenderer(renderer)
renderWindowInteractor.SetInteractorStyle(style)
# add the actors to the scene
renderer.AddActor(meshActor)
# render and interact
renderWindow.Render()
renderWindowInteractor.Start()
# now the interaction is running until we close the window
# cleanup after closing the window
del renderWindow
del renderWindowInteractor
# We define a class for a polyhedron.
# Its starts empty and we can successively add faces keeping track of the number of points
# and the correct indices for faces.
# Finally we can output a solid geometry.
class SolidPolyhedron():
def __init__(self):
"""
Create an empty SolidPolyhedron object
"""
self.points = []
self.faces = []
self.NP = 0
def replace_point(self,old,new):
"""
Replace all references to the old point by a reference to the new point.
"""
for face in self.faces:
for k,f in enumerate(face):
if f==old:
# face is a reference, so we can modify it in place
face[k]=new
def remove_point(self,index):
"""
Remove one point with given index from the list.
This is only possible if there exist no references to it in the faces.
All face indexes are updated according to the shift in list positions.
"""
# first check if there are no references
for face in self.faces:
for f in face:
if f==index:
raise Exception("attempting to remove a point with existing references")
# delete the point from the list
del self.points[index]
self.NP = len(self.points)
# move the indexes of the faces
for i in range(index,self.NP):
self.replace_point(i+1,i)
def add_triangle(self,new_points):
"""
Add a triangle with given points to the faces of the solid.
Points are only added if they are not yet present.
"""
if len(new_points) != 3:
raise Exception("triangles should be given with 3 points.")
new_face = [0,0,0]
# append the new points to the list
for i,new in enumerate(new_points):
is_new = True
# check if this new point is already present
for k,p in enumerate(self.points):
if sqr_dist(p,new)<tolerance:
new_face[i] = k
is_new = False
# do not append points that are already present
if is_new:
new_face[i] = self.NP
self.points.append(new)
self.NP += 1
self.faces.append(new_face)
def add_polygon(self,new_points):
"""
Add a face defined by a polygon.
Degenerated edges are removed.
The polygon is recursively split into triangles, always cutting off
the triangle with the sharpest corner.
"""
new_NP = len(new_points)
# remove degenerate edges
i=1
# we have to use while loops as the end may change during the execution
while i<new_NP:
p1 = new_points[i-1]
p2 = new_points[i]
if sqr_dist(p1,p2)<tolerance:
del new_points[i]
new_NP -= 1
print('removed one degenerate edge')
# if the edge was degenerate we have to try te same index again
else:
i += 1
# add the face
if new_NP<3: raise Exception("too few points for a polygon.")
if new_NP==3: self.add_triangle(new_points)
else:
# find the sharpest corner
min_angle = 1.0
# i is the index of the corner under consideration
for i in range(new_NP):
ind = [i-1,i,i+1]
# the positive wrap-around has to be handled explicitely, the -1 index works as built-in
if i+1==new_NP: ind = [i-1,i,0]
points = [new_points[k] for k in ind]
a = angle(points)
if a<min_angle:
tri_ind = i
tri_points = points
min_angle = a
self.add_triangle(tri_points)
# the rest is the origonal polygon with the sharpest corner dropped
rest_ind = range(new_NP)
rest_ind.remove(tri_ind)
rest = [new_points[i] for i in rest_ind]
# recursively add the rest polygon
self.add_polygon(rest)
def add(self,new_points,new_faces):
"""
Add a collection of faces to the lists
"""
old_NP = self.NP
new_NP = len(new_points)
# the new points are appended after the existing ones
for p in new_points:
self.points.append(p)
# all indices have to be corrected for the number of already existing points
for f in new_faces:
new_face = [i+old_NP for i in f]
self.faces.append(new_face)
self.NP += new_NP
# now check if any of the new points were already present
# the code is the same as unify() except that the ranges of the test
# are limited to the old (NP) and new points, respectively
# we have to use while loops as the end may change during the execution
i=0
while i<old_NP:
# k starts with the first new point
k=old_NP
while k<self.NP:
if sqr_dist(self.points[i],self.points[k])<tolerance:
# replace the new point with the already present
self.replace_point(k,i)
self.remove_point(k)
k+=1
i+=1
def unify(self):
"""
Check for duplicated points with less than tolerance distance.
"""
# we have to use while loops as the end may change during the execution
i=0
while i<self.NP:
k=i+1
while k<self.NP:
if sqr_dist(self.points[i],self.points[k])<tolerance:
# replace the latter point with the former
self.replace_point(k,i)
self.remove_point(k)
k+=1
i+=1
def summary(self):
print("NP = %d" % self.NP)
print("faces %d" % len(self.faces))
def check(self,debug=False):
"""
We want to check the correctness and completeness of the solid.
It is a closed volume with all normals pointing to the same side if all edges
exit exactly twice with opposite directions.
The debug flag controls the generation of output about every flaw found.
"""
# make a list of all edges starting from the points
count = 0
edges = [[] for i in range(self.NP)]
for f in self.faces:
NF = len(f)
for i in range(NF-1):
edges[f[i]].append(f[i+1])
count += 1
edges[f[NF-1]].append(f[0])
count += 1
print('found %d edges' % count)
# check for duplicated edges
count = 0
for p1,e in enumerate(edges):
set_e = set()
for p2 in e:
if p2 in set_e:
if debug: print('found duplicated edge from %d to %d.' % (p1,p2))
count += 1
else:
set_e.add(p2)
print('found %d duplicated edges' % count)
# check for every edge if the opposite direction exists
count = 0
for p1 in range(self.NP):
for p2 in edges[p1]:
if not p1 in edges[p2]:
count = count+1
if debug: print('found free edge from %d to %d.' % (p1,p2))
print('found %d free edges' % count)
def getPolyData(self):
"""
Return a vtkPolyData object
"""
pts = vtk.vtkPoints()
for p in self.points: pts.InsertNextPoint(p)
cells = vtk.vtkCellArray()
for f in self.faces:
cells.InsertNextCell(len(f), f)
meshData = vtk.vtkPolyData()
meshData.SetPoints(pts)
meshData.SetPolys(cells)
return meshData
def writeSTL(self,filename):
pts = vtk.vtkPoints()
for p in self.points: pts.InsertNextPoint(p)
cells = vtk.vtkCellArray()
for f in self.faces:
cells.InsertNextCell(len(f), f)
meshData = vtk.vtkPolyData()
meshData.SetPoints(pts)
meshData.SetPolys(cells)
stlWriter = vtk.vtkSTLWriter()
stlWriter.SetFileTypeToASCII()
# stlWriter.SetFileTypeToBinary()
stlWriter.SetInputData(meshData)
stlWriter.SetFileName(filename)
stlWriter.Write()
def visualize(self, showEdges=True, Opacity=0.9):
meshData = self.getPolyData()
# map the triangle meshs into the scene
meshMapper = vtk.vtkPolyDataMapper()
meshMapper.SetInputData(meshData)
# add the actors to the scene
meshActor = vtk.vtkActor()
meshActor.SetMapper(meshMapper)
if showEdges:
meshActor.GetProperty().EdgeVisibilityOn()
else:
meshActor.GetProperty().EdgeVisibilityOff()
meshActor.GetProperty().SetColor(vtk.vtkNamedColors().GetColor3d("Yellow"))
meshActor.GetProperty().SetOpacity(Opacity)
# create a render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(vtk.vtkNamedColors().GetColor3d("SlateGray"))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(800,600)
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.Initialize()
style = vtk.vtkInteractorStyleTrackballCamera()
style.SetDefaultRenderer(renderer)
renderWindowInteractor.SetInteractorStyle(style)
# add the actors to the scene
renderer.AddActor(meshActor)
# render and interact
renderWindow.Render()
renderWindowInteractor.Start()
# now the interaction is running until we close the window
# cleanup after closing the window
del renderWindow
del renderWindowInteractor | 38.977778 | 104 | 0.592116 |
5142b6d43b642f6ca682d05ab7982aba08421eb8 | 8,542 | py | Python | tensor2tensor/data_generators/algorithmic_math_deepmind.py | jonmcwong/tensor2tensor | 71f2c2d8624858e5e6a091f7d7c67c43f097ef19 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/data_generators/algorithmic_math_deepmind.py | jonmcwong/tensor2tensor | 71f2c2d8624858e5e6a091f7d7c67c43f097ef19 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/data_generators/algorithmic_math_deepmind.py | jonmcwong/tensor2tensor | 71f2c2d8624858e5e6a091f7d7c67c43f097ef19 | [
"Apache-2.0"
] | 1 | 2020-06-19T17:36:10.000Z | 2020-06-19T17:36:10.000Z | # coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Data generators for the DeepMind Mathematics Dataset.
See https://github.com/deepmind/mathematics_dataset for the original repository.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import pdb
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
_URL = "https://storage.cloud.google.com/mathematics-dataset/mathematics_dataset-v1.0.tar.gz"
@registry.register_problem
class AlgorithmicMathDeepmindAll(text_problems.Text2TextProblem):
"""DeepMind Mathematics Problem, v1.0, all data."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.task_direction = FLAGS.task_direction
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
# @property
# def task_direction(self):
# return problem.TaskDirections.Q12
@property
def dataset_splits(self):
if self.task_direction == problem.TaskDirections.NORMAL:
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 128,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
elif self.task_direction == problem.TaskDirections.EASY:
return [{
"split": "train_easy",
"shards": 64,
}]
elif self.task_direction == problem.TaskDirections.EASY_MEDIUM:
return [{
"split": "train_easy_medium",
"shards": 64,
}]
elif self.task_direction == problem.TaskDirections.INTERPOLATE:
return [{
"split": "single_inter",
"shards": 1,
}]
elif self.task_direction == problem.TaskDirections.EXTRAPOLATE:
return [{
"split": "single_extra",
"shards": 1,
}]
elif self.task_direction == problem.TaskDirections.Q8:
return [{
# "split": "train_easy_add_or_sub", "shards": 1,
# }, {
# "split": "train_medium_add_or_sub", "shards": 1,
# }, {
# "split": "train_hard_add_or_sub", "shards": 1,
# }, {
# "split": "extra_add_or_sub_big", "shards": 1,
# }, {
# "split": "train_easy_mul", "shards": 1,
# }, {
# "split": "train_medium_mul", "shards": 1,
# }, {
# "split": "train_hard_mul", "shards": 1,
# }, {
# "split": "extra_mul_big", "shards": 1,
# }, {
"split": "train_easy_add_sub_multiple", "shards": 1,
}, {
"split": "train_medium_add_sub_multiple", "shards": 1,
}, {
"split": "train_hard_add_sub_multiple", "shards": 1,
}, {
"split": "extra_add_sub_multiple_longer", "shards": 1,
}]
elif self.task_direction == problem.TaskDirections.Q12:
return [{
"split": "extra_add_or_sub_big", "shards": 1,
}, {
"split": "extra_add_sub_multiple_longer", "shards": 1,
}, {
"split": "extra_div_big", "shards": 1,
}, {
"split": "extra_mixed_longer", "shards": 1,
}, {
"split": "extra_mul_big", "shards": 1,
}, {
"split": "extra_mul_div_multiple_longer", "shards": 1,
}, {
"split": "inter_add_or_sub", "shards": 1,
}, {
"split": "inter_add_sub_multiple", "shards": 1,
}, {
"split": "inter_div", "shards": 1,
}, {
"split": "inter_mixed", "shards": 1,
}, {
"split": "inter_mul", "shards": 1,
}, {
"split": "inter_mul_div_multiple", "shards": 1,
}]
else:
raise ValueError("Found unknown task_direction which is ", self.task_direction)
# What evaluation metrics to use with this problem.
def eval_metrics(self):
return [metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ]
@property
def is_generate_per_split(self):
return True
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""Downloads and extracts the dataset and generates examples.
Args:
data_dir: The base directory where data and vocab files are stored.
tmp_dir: temp directory to download and extract the dataset.
dataset_split: split of the data-set.
Yields:
The data examples.
"""
# # Create directories if needed.
# if not tf.gfile.Exists(tmp_dir):
# tf.gfile.MakeDirs(tmp_dir)
# if not tf.gfile.Exists(data_dir):
# tf.gfile.MakeDirs(data_dir)
# # Download and extract the data.
# filename = os.path.basename(_URL)
# path = generator_utils.maybe_download(tmp_dir, filename, _URL)
# print("PATH: ", path)
# tarfile.open(path, "r:gz").extractall(tmp_dir)
def expand_split(dataset_split):
if dataset_split[:5] == "inter" or dataset_split[:5] == "extra":
return dataset_split[:5] + "polate/arithmetic__" + dataset_split[6:] + ".txt"
elif dataset_split[:5] == "train":
items = dataset_split.split("_")
return items[0] + "-" + items[1] + "/arithmetic__" + "_".join(items[2:]) + ".txt"
else:
raise ValueError(dataset_split)
split_names = [p["split"] for p in self.dataset_splits]
train_dirs = ["mathematics_dataset-v1.0/train-easy", "mathematics_dataset-v1.0/train-medium", "mathematics_dataset-v1.0/train-hard"]
eval_dirs = ["mathematics_dataset-v1.0/interpolate", "mathematics_dataset-v1.0/extrapolate"]
# pdb.set_trace()
if self.task_direction == problem.TaskDirections.NORMAL:
dirs = eval_dirs
# Create the list of directories with data files.
if dataset_split == problem.DatasetSplit.TRAIN:
dirs = train_dirs
elif self.task_direction == problem.TaskDirections.EASY:
dirs = train_dirs[0:1]
elif self.task_direction == problem.TaskDirections.EASY_MEDIUM:
dirs = train_dirs[0:2]
elif self.task_direction == problem.TaskDirections.INTERPOLATE:
dirs = eval_dirs[0:1]
elif self.task_direction == problem.TaskDirections.EXTRAPOLATE:
dirs = eval_dirs[1:2]
elif self.task_direction == problem.TaskDirections.Q12:
dirs = ["mathematics_dataset-v1.0/" + expand_split(dataset_split)]
elif self.task_direction == problem.TaskDirections.Q8:
dirs = ["mathematics_dataset-v1.0/" + expand_split(dataset_split)]
else:
raise ValueError("Found unknown task_direction which is ", self.task_direction)
dirs = [os.path.join(tmp_dir, d) for d in dirs]
# pdb.set_trace()
# Iterate over directories and files generating examples.
for d in dirs:
if self.task_direction == problem.TaskDirections.NORMAL:
files = tf.gfile.Glob(d + "/*.txt")
elif self.task_direction == problem.TaskDirections.Q12:
files = [d]
elif self.task_direction == problem.TaskDirections.Q8:
files = [d]
elif self.task_direction == problem.TaskDirections.INTERPOLATE:
files = files = tf.gfile.Glob(d + "/*.txt")
elif self.task_direction == problem.TaskDirections.EXTRAPOLATE:
files = files = tf.gfile.Glob(d + "/*.txt")
else:
raise ValueError("Found unknown task_direction which is ", self.task_direction)
for fname in files:
# In each text file, the first line is the input, the next the answer,
# and so on until the end of the file.
cur_input = None
with tf.gfile.Open(fname, "rb") as f:
for line in f:
if cur_input is None:
# cur_input = line.strip()
cur_input = str(line)
else:
yield {"inputs": cur_input, "targets": str(line)}
# yield {"inputs": cur_input, "targets": line.strip()}
cur_input = None
| 35.297521 | 136 | 0.63697 |
8aa2e558e754dfd73dbd16865939a2e25cde89ea | 2,041 | py | Python | Python/Pac-Man version 1/Other/BFS.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | Python/Pac-Man version 1/Other/BFS.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | Python/Pac-Man version 1/Other/BFS.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null |
a= [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0 ,0, 0, 0, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0 ,0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0 ,0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
def find_path(maze, start, end):
a = maze.copy()
def make_step(k):
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] == k:
if i>0 and m[i-1][j] == 0 and a[i-1][j] == 0:
m[i-1][j] = k + 1
if j>0 and m[i][j-1] == 0 and a[i][j-1] == 0:
m[i][j-1] = k + 1
if i<len(m)-1 and m[i+1][j] == 0 and a[i+1][j] == 0:
m[i+1][j] = k + 1
if j<len(m[i])-1 and m[i][j+1] == 0 and a[i][j+1] == 0:
m[i][j+1] = k + 1
m = []
for i in range(len(a)):
m.append([])
for j in range(len(a[i])):
m[-1].append(0)
i,j = start
m[i][j] = 1
k = 0
while m[end[0]][end[1]] == 0:
k += 1
make_step(k)
i, j = end
k = m[i][j]
the_path = [(i,j)]
while k > 1:
if i > 0 and m[i - 1][j] == k-1:
i, j = i-1, j
the_path.append((i, j))
k-=1
elif j > 0 and m[i][j - 1] == k-1:
i, j = i, j-1
the_path.append((i, j))
k-=1
elif i < len(m) - 1 and m[i + 1][j] == k-1:
i, j = i+1, j
the_path.append((i, j))
k-=1
elif j < len(m[i]) - 1 and m[i][j + 1] == k-1:
i, j = i, j+1
the_path.append((i, j))
k -= 1
return the_path
print(find_path(a, (1,1), (5,19))) | 28.746479 | 67 | 0.344929 |
02c3b6efaada83e808d1feb3b32650eb4dd6814c | 2,247 | py | Python | hero.py | PythonixCoders/PyWeek30 | 553dd5c5fbaaac7afb2f6c3c6b01073d1b0f38a9 | [
"MIT"
] | null | null | null | hero.py | PythonixCoders/PyWeek30 | 553dd5c5fbaaac7afb2f6c3c6b01073d1b0f38a9 | [
"MIT"
] | null | null | null | hero.py | PythonixCoders/PyWeek30 | 553dd5c5fbaaac7afb2f6c3c6b01073d1b0f38a9 | [
"MIT"
] | null | null | null | import pygame
from settings import *
from utils import image_load
class Hero(pygame.sprite.Sprite):
def __init__(self, pos=None):
super().__init__()
self._sheet = image_load(BASEDIR / "assets" / "images" / "characters.png").convert_alpha()
self.animation = {}
self._position = pos or [0, 0]
self.velocity = [0, 0]
self.anim_dir = "down"
self.frame = 0.0
self.frame_speed = 3.0
self.speed = 20
for i, anim in enumerate(["down", "left", "right", "up"]):
r = pygame.Rect((3 * 16, 16 * i), (16, 16))
self.animation.setdefault(anim, []).append(self._sheet.subsurface(r))
r.left += 16
self.animation.setdefault(anim, []).append(self._sheet.subsurface(r))
r.left += 16
self.animation.setdefault(anim, []).append(self._sheet.subsurface(r))
self.image = self.animation[self.anim_dir][int(self.frame)]
self.rect = self.image.get_rect()
self.feet = pygame.Rect(0, 0, self.rect.width + 4, 4)
self._old_position = self.position
@property
def position(self):
return list(self._position)
@position.setter
def position(self, value):
self._position = list(value)
def update(self, dt):
self._old_position = self._position[:]
if self.velocity == [0, 0]:
self.frame = 1.0
self.frame_speed = 3.0
else:
# Make this to ping pong
self.frame += self.frame_speed * dt
if self.frame < 0.0:
self.frame_speed = -self.frame_speed
self.frame = 0.0
if self.frame >= 3.0:
self.frame_speed = -self.frame_speed
self.frame = 2.99
self._position[0] += self.velocity[0] * self.speed * dt
self._position[1] += self.velocity[1] * self.speed * dt
self.rect.topleft = self._position
self.feet.midbottom = self.rect.midbottom
self.image = self.animation[self.anim_dir][int(self.frame)]
def move_back(self):
self._position = self._old_position
self.rect.topleft = self._position
self.feet.midbottom = self.rect.midbottom
| 34.569231 | 98 | 0.574099 |
9faf7a42bc6cc4f849f94ccb346a97174b16b4d8 | 45,465 | py | Python | data/coco.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | 1 | 2018-12-09T06:09:29.000Z | 2018-12-09T06:09:29.000Z | data/coco.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | data/coco.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import copy
from colorama import Fore
import matplotlib.pyplot as plt
import skimage.io as io
import pylab
from pandas.plotting import table
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas as pd
from collections import Iterable
import re
import pandas as pd
from abc import ABCMeta, abstractmethod
import random
import json
from skimage import io
import cv2
import time
from enum import Enum
from PIL import Image
import os
import json
import numpy as np
from pycocotools.coco import COCO
import Putil.base.logger as plog
logger = plog.PutilLogConfig('coco').logger()
logger.setLevel(plog.DEBUG)
COCODataLogger = logger.getChild('COCOData')
COCODataLogger.setLevel(plog.DEBUG)
COCOBaseLogger = logger.getChild('COCOBase')
COCOBaseLogger.setLevel(plog.DEBUG)
from Putil.base import putil_status
from Putil.data import cocoeval
import Putil.data.vision_common_convert.bbox_convertor as bbox_convertor
from Putil.data.util.vision_util import detection_util
import Putil.data.common_data as pcd
class COCOBase(pcd.CommonDataForTrainEvalTest):
'''
@brief
@note
有关coco的信息,总共有四类大任务:目标检测detection、全景分割panoptic、图像内容描述captions、人体目标点检测keypoints
使用getImgIds不指定catIds获取到的img_ids是所有图片的id,可以使用[v['image_id'] for k, v in coco.anns.items()]来获取
真正对应有目标任务ann的图片的id,通过coco_basical_statistic可以得知:三个标注文件是包含关系caption包含instance包含person_keypoint
'''
# represent->cat_id->cat_name->represent
represent_to_cat_id = OrderedDict({0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90})
cat_id_to_represent = OrderedDict()
for represent, cat_id in represent_to_cat_id.items():
cat_id_to_represent[cat_id] = represent
#cat_id_to_represent = {cat_id: represent for represent, cat_id in represent_to_cat_id.items()}
cat_id_to_cat_name = OrderedDict({1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed', 67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush'})
cat_name_to_represent = OrderedDict()
for cat_id, cat_name in cat_id_to_cat_name.items():
cat_name_to_represent[cat_name] = cat_id_to_represent[cat_id]
# TODO: important problem remain 当使用以下方法生成cat_name_to_represent时出现cat_id_to_represent undefined的情况
#cat_name_to_represent = {cat_name: cat_id_to_represent[cat_id] for cat_id, cat_name in cat_id_to_cat_name.items()}
# datas field
## base information
base_information_length = 3
image_height_index_in_base_information = 0
image_width_index_in_base_information = 1
image_id_index_in_base_information = 2
## datas
data_length = 4
image_index = 0
detection_box_index = 1
base_information_index = 2
detection_class_index = 3
## result
result_length = 5 # format: list
result_base_information_index = 0
result_image_index = 1 # image 表示当前的图像,shape为[Height, Width, Channel], 类型为RGB,注意cv2读取以及写图像都默认是BGR格式
result_detection_box_index = 2 # format: [[top_x, top_y, width, height], ...]
result_detection_class_index = 3 # format: [class_represent] class_represent表示的是当前class使用的索引号,不是cat_id
result_detection_score_index = 4
@staticmethod
def generate_base_information(image_ann):
base_information = [None] * COCOBase.base_information_length
base_information[COCOBase.image_height_index_in_base_information] = image_ann[0]['height']
base_information[COCOBase.image_width_index_in_base_information] = image_ann[0]['width']
base_information[COCOBase.image_id_index_in_base_information] = image_ann[0]['id']
return base_information
@staticmethod
def generate_default_datas():
return [None] * COCOBase.data_length
@staticmethod
def generate_default_result():
return [None] * COCOBase.result_length
@staticmethod
def detection_get_cat_id(cat_name=None, represent_value=None):
assert False in [t is None for t in [cat_name, represent_value]]
return COCOBase.represent_to_cat_id[COCOBase.cat_name_to_represent[cat_name]] if cat_name is not None else COCOBase.represent_to_cat_id[represent_value]
@staticmethod
def detection_get_cat_name(cat_id=None, represent_value=None):
assert False in [t is None for t in [cat_id, represent_value]]
return COCOBase.cat_id_to_cat_name[cat_id] if cat_id is not None else COCOBase.cat_id_to_cat_name[COCOBase.represent_to_cat_id[represent_value]]
@staticmethod
def coco_basical_statistic(coco_root_dir, save_to):
'''
@brief
统计每个任务形式使用的图像数量以及重叠数量
'''
instances_file_train = os.path.join(coco_root_dir, 'annotations/instances_train2017.json')
instances_file_eval = os.path.join(coco_root_dir, 'annotations/instances_val2017.json')
person_keypoints_train = os.path.join(coco_root_dir, 'annotations/person_keypoints_train2017.json')
person_keypoints_eval = os.path.join(coco_root_dir, 'annotations/person_keypoints_val2017.json')
captions_train = os.path.join(coco_root_dir, 'annotations/captions_train2017.json')
captions_eval = os.path.join(coco_root_dir, 'annotations/captions_val2017.json')
image_info_test = os.path.join(coco_root_dir, 'annotations/image_info_test2017.json')
# img_amount
result = list()
itcoco = COCO(instances_file_train)
i_train_img_ids = set([v['image_id'] for k, v in itcoco.anns.items()])
result.append({'name': 'train_instance', 'img_amount': len(i_train_img_ids)})
ptcoco = COCO(person_keypoints_train)
p_train_img_ids = set([v['image_id'] for k, v in ptcoco.anns.items()])
result.append({'name': 'train_person_keypoint', 'img_amount': len(p_train_img_ids)})
ctcoco = COCO(captions_train)
c_train_img_ids = set([v['image_id'] for k, v in ctcoco.anns.items()])
result.append({'name': 'train_caption', 'img_amount': len(c_train_img_ids)})
img_ids_in_instance_and_person_keypoint = [i for i in p_train_img_ids if i in i_train_img_ids]
result.append({'name': 'train_instance_person_keypoint', 'img_amount': len(img_ids_in_instance_and_person_keypoint)})
img_ids_in_instance_and_caption = [i for i in p_train_img_ids if i in c_train_img_ids]
result.append({'name': 'train_instance_caption', 'img_amount': len(img_ids_in_instance_and_caption)})
img_ids_in_instance_and_person_keypoint_and_caption = [i for i in img_ids_in_instance_and_person_keypoint if i in img_ids_in_instance_and_caption]
result.append({'name': 'instance_person_keypoint_caption', 'img_amount': len(img_ids_in_instance_and_person_keypoint_and_caption)})
iecoco = COCO(instances_file_eval)
i_eval_img_ids = set([v['image_id'] for k, v in iecoco.anns.items()])
result.append({'name': 'eval_instance', 'img_amount': len(i_eval_img_ids)})
pecoco = COCO(person_keypoints_eval)
p_eval_img_ids = set([v['image_id'] for k, v in pecoco.anns.items()])
result.append({'name': 'eval_person_keypoint', 'img_amount': len(p_eval_img_ids)})
cecoco = COCO(captions_eval)
c_eval_img_ids = set([v['image_id'] for k, v in cecoco.anns.items()])
result.append({'name': 'eval_caption', 'img_amount': len(c_eval_img_ids)})
img_ids_in_instance_and_person_keypoint = [i for i in p_eval_img_ids if i in i_eval_img_ids]
result.append({'name': 'eval_instance_person_keypoint', 'img_amount': len(img_ids_in_instance_and_person_keypoint)})
img_ids_in_instance_and_caption = [i for i in p_eval_img_ids if i in c_eval_img_ids]
result.append({'name': 'eval_instance_caption', 'img_amount': len(img_ids_in_instance_and_caption)})
img_ids_in_instance_and_person_keypoint_and_caption = [i for i in img_ids_in_instance_and_person_keypoint if i in img_ids_in_instance_and_caption]
result.append({'name': 'instance_person_keypoint_caption', 'img_amount': len(img_ids_in_instance_and_person_keypoint_and_caption)})
result_df = pd.DataFrame(result)
plt.rcParams['savefig.dpi'] = 300
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(111, frame_on=False) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, result_df, loc='center')
plt.savefig(os.path.join(save_to, 'basical_statistic.png'))
pass
@staticmethod
def detection_statistic_obj_size_follow_cat(cat_names, ann_file, save_to):
cat_ids = [COCOBase.represent_to_cat_id[COCOBase.cat_name_to_represent[cat_name]] for cat_name in cat_names] if type(cat_names).__name__ == 'list'\
else [COCOBase.represent_to_cat_id[COCOBase.cat_name_to_represent[cat_names]]]
coco = COCO(ann_file)
#row_amount = np.floor(np.sqrt(len(cat_ids)))
#col_amount = row_amount
#plt.figure(figsize=(row_amount, col_amount))
#fig = plt.figure()
#fig.suptitle('y: counts, x: bbox area/1000')
for index, cat_id in enumerate(cat_ids):
#plt.subplot(row_amount, col_amount, index + 1)
ann_ids = coco.getAnnIds(catIds=[cat_id])
anns = coco.loadAnns(ann_ids)
anns_df = pd.DataFrame(anns)
bbox_area = anns_df['bbox'].apply(lambda x: x[2] * x[3])
plt.rcParams['savefig.dpi'] = 300
(bbox_area/100).plot.hist(grid=True, bins=500, rwidth=0.9, color='#607c8e')
plt.title(COCOBase.cat_id_to_cat_name[cat_id])
plt.ylabel('Counts')
plt.xlabel('bbox area/100')
plt.savefig(os.path.join(save_to, 'box_area_histogram_{}.png'.format(COCOBase.cat_id_to_cat_name[cat_id])))
plt.close()
#plt.show()
#hist, xedges, yedges = np.histogram2d(anns_df['bbox'].apply(lambda x: x[2]), anns_df['bbox'].apply(lambda x: x[3]), bins=1000)
pass
pass
@staticmethod
def detection_statistic_img_amount_obj_amount(ann_file, save_to, cat_name=None):
coco = COCO(ann_file)
if cat_name is not None:
cat_ids = [COCOBase.represent_to_cat_id[COCOBase.cat_name_to_represent[cat_name]] for cat_name in cat_names] if type(cat_names).__name__ == 'list'\
else [COCOBase.represent_to_cat_id[COCOBase.cat_name_to_represent[cat_names]]]
pass
else:
cat_ids = coco.getCatIds()
pass
result = list()
for cat_id in cat_ids:
img_id = coco.getImgIds(catIds=[cat_id])
ann_id = coco.getAnnIds(catIds=[cat_id])
result.append({'category': COCOBase.cat_id_to_cat_name[cat_id], 'img_amount': len(img_id), \
'cat_id': cat_id, 'obj_amount': len(ann_id)})
pass
result.append({'category': 'all', 'img_amount': len(set([v['image_id'] for k, v in coco.anns.items()])), 'cat_id': 'all', \
'obj_amount': len(coco.anns)})
result_df = pd.DataFrame(result)
plt.rcParams['savefig.dpi'] = 300
fig = plt.figure(figsize=(5, 15))
ax = fig.add_subplot(111, frame_on=False) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, result_df, loc='center')
plt.savefig(os.path.join(save_to, 'category_img_amount.png'))
pass
@staticmethod
def detection_statistic_obj_size_follow_img(img_id, ann_file):
pass
def __init__(
self,
coco_root_dir,
stage,
information_save_to_path,
detection,
key_points,
stuff,
panoptic,
dense_pose,
captions,
use_rate,
remain_strategy,
cat_ids=None,
):
pcd.CommonDataWithAug.__init__(self, use_rate=use_rate, sub_data=cat_ids, remain_strategy=remain_strategy)
self._information_save_to_path = information_save_to_path
self._coco_root_dir = coco_root_dir
self._stage = stage
self._img_root_name = 'train2017' if self._stage == COCOData.Stage.Train else \
('val2017' if self._stage == COCOData.Stage.Evaluate else 'test2017')
self._img_root_dir = os.path.join(self._coco_root_dir, self._img_root_name)
self._detection = detection
self._key_points = key_points
self._stuff = stuff
self._panoptic = panoptic
self._dense_pose = dense_pose
self._captions = captions
assert True in [self._detection, self._key_points, self._stuff, self._panoptic, self._dense_pose, self._captions]
self._cat_ids = cat_ids
COCOBaseLogger.info('specified cat_ids: {}'.format(self._cat_ids)) if self._cat_ids is not None else None
self._instances_file_train = os.path.join(self._coco_root_dir, 'annotations/instances_train2017.json')
self._instances_file_eval = os.path.join(self._coco_root_dir, 'annotations/instances_val2017.json')
self._person_keypoints_file_train = os.path.join(self._coco_root_dir, 'annotations/person_keypoints_train2017.json')
self._person_keypoints_file_eval = os.path.join(self._coco_root_dir, 'annotations/person_keypoints_val2017.json')
self._captions_file_train = os.path.join(self._coco_root_dir, 'annotations/captions_train2017.json')
self._captions_file_eval = os.path.join(self._coco_root_dir, 'annotations/captions_val2017.json')
self._image_info_test = os.path.join(self._coco_root_dir, 'annotations/image_info_test2017.json')
belong_instances = [self._detection, self._stuff, self._panoptic]
belong_person_keypoints = [self._key_points]
belong_captions = [self._captions]
with_label = [COCOBase.Stage.Train, COCOBase.Stage.Evaluate]
without_label = [COCOBase.Stage.Test]
self._captions_coco, captions_load = (COCO(self._captions_file_train \
if self._stage == COCOBase.Stage.Train else self._captions_file_eval), True) \
if ((self._stage in with_label) and (self._captions)) else (None, False)
self._captions_img_ids = list(set([v['image_id'] for k, v in self._captions_coco.anns.items()])) if captions_load else list()
self._instances_coco, instances_load = (COCO(self._instances_file_train \
if self._stage == COCOBase.Stage.Train else self._instances_file_eval), True) \
if ((self._stage in with_label) and (True in [self._detection, self._stuff, self._panoptic])) else (None, False)
self._instances_img_ids = list(set([v['image_id'] for k, v in self._instances_coco.anns.items()])) if instances_load else list()
self._person_keypoints_coco, key_point_load = (COCO(self._person_keypoint_file_train \
if self._stage == COCOBase.Stage.Train else self._person_keypoints_file_eval), True) \
if ((self._stage in with_label) and (self._key_points)) else (None, False)
self._person_keypoints_img_ids = list(set([v['image_id'] for k, v in self._preson_keypoints_coco.anns.items()])) if key_point_load else list()
self._image_test, image_test_load = (COCO(self._image_info_test), True) if self._stage in without_label else (None, False)
self._image_test_img_ids = self._image_test.getImgIds() if image_test_load else list()
assert [instances_load, key_point_load, captions_load, image_test_load].count(True) == 1, 'only support one type'
#COCOBaseLogger.warning('') if self._cat_id != COCOBase.
# we know use the detectio only
#self._data_field = COCOBase.__get_common_id([self._instances_img_ids, self._persion_keypoints_img_ids, \
# self._captions_img_ids, self._image_test_img_ids])
# TODO:record
if self._stage in [COCOBase.Stage.Train, COCOBase.Stage.Evaluate]:
if key_point_load:
assert not instances_load and not captions_load, 'should not use person_keypoint with caption and instance'
COCOBaseLogger.warning(Fore.RED + 'cat_id is invalid in person_keypoint' + Fore.RESET) if self._cat_ids is not None else None
self._data_field = self._person_keypoints_img_ids
if instances_load:
COCOBaseLogger.info('use instance{}'.format(' and caption' if captions_load else ''))
self._data_field = self._instances_coco.getImgIds(catIds=self._cat_ids) if self._cat_ids is not None and self._remain_strategy == COCOBase.RemainStrategy.Drop else self._instances_img_ids
self._cat_id_to_represent = copy.deepcopy(COCOBase.cat_id_to_represent) if self._cat_ids is None else {cat_id: index for index, cat_id in enumerate(self._cat_ids)}
self._represent_to_cat_id = {v: k for k, v in self._cat_id_to_represent.items()}
if self._information_save_to_path is not None:
with open(os.path.join(self._information_save_to_path, 'detection_cat_id_to_represent.json'), 'w') as fp:
json.dump(self._cat_id_to_represent, fp, indent=4)
if captions_load and not instances_load:
COCOBaseLogger.info('use caption')
self._data_field = self._captions_img_ids
elif self._stage == COCOBase.Stage.Test:
COCOBaseLogger.warning(Fore.RED + 'cat_ids is invalid in Test' + Fore.RESET) if self._cat_ids is not None else None
self._data_field = self._image_test_img_ids
else:
raise NotImplementedError(Fore.RED + 'Stage: {} is not Implemented'.format(stage) + Fore.RESET)
self._fix_field()
## check the ann
#if self._stage in with_label:
# image_without_ann = dict()
# for index in self._data_field:
# image_ann = self._instances_coco.loadImgs(index)
# ann_ids = self._instances_coco.getAnnIds(index)
# if len(ann_ids) == 0:
# image_without_ann[index] = image_ann
# for index_out in list(image_without_ann.keys()):
# self._data_field.remove(index_out)
# with open('./image_without_ann.json', 'w') as fp:
# str_ = json.dumps(image_without_ann, indent=4)
# fp.write(str_)
# pass
# result
self._detection_result = None
self._detection_result_file_name = 'detection_result'
# detection indicator result
self._detection_indicator_result = None
self._detection_indicator_result_file_name = 'detection_indicator_result'
pass
def read_image(self, file_name):
image = cv2.imread(os.path.join(self._img_root_dir, file_name)).astype(np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
assert(image is not None)
return image
def represent_value_to_category_id(self, represent_value):
pass
def save_result(self, result=None, save=False, prefix=None):
'''
@brief
@note
输入的result以image为基准,每一个result包含一个image,所有任务公共结果信息为image与base_information
detection任务的result说明:
主要bboxes,classes,scores
bboxes, ndarray, 为依照datas输出image的尺寸的bboxes,单个bbox格式为[top_x,top_y,width,height],一张图多个bbox使用list组成bboxes
classes, ndarray, 使用的是模型输出的classes,COCO中存在cat_id<-->represent的映射关系,result中的classes使用的是represent,这样有利于
COCO的封闭性与完整性,单个classes的格式为:int,一张图每个bbox对应一个class,使用list组成classes
scores, ndarray, 使用的是模型输出的score,float,每个bbox对应一个score,使用list组成scores
@param[in] result result默认为None,当result为None时,是不增加result数据的,其他照常进行
@param[in] save bool类型,决定是否将当前的result保存到文件中
'''
if result is None:
self.add_detection_result(save=save, prefix=prefix)
return
if self._detection:
base_information = result[COCOBase.result_base_information_index]
image = result[COCOBase.result_image_index]
image_id = base_information[COCOBase.image_id_index_in_base_information]
image_width = base_information[COCOBase.image_width_index_in_base_information]
image_height = base_information[COCOBase.image_height_index_in_base_information]
# true_image_size / resized_image_size = true_box_size / resized_box_size
bboxes = result[COCOBase.result_detection_box_index] * ([image_width / image.shape[1], image_height / image.shape[0]] * 2)
bboxes = bboxes if type(bboxes).__name__ == 'list' else bboxes.tolist()
classes = result[COCOBase.result_detection_class_index]
classes = [self._represent_to_cat_id[_class] for _class in classes]
classes = classes if type(classes).__name__ == 'list' else classes.tolist()
scores = result[COCOBase.result_detection_score_index]
scores = scores if type(scores).__name__ == 'list' else scores.tolist()
self.add_detection_result(
image=image,
image_id=image_id,
category_ids=classes, bboxes=bboxes, scores=scores, save=save, prefix=prefix)
return 0
pass
elif self._stuff:
raise NotImplementedError('save the detection result is not implemented')
pass
else:
raise NotImplementedError('save the detection result is not implemented')
pass
pass
@staticmethod
def generate_result_file_name(prefix, common_name):
return '{}{}.csv'.format('{}_'.format(prefix) if prefix is not None else '', common_name)
def add_detection_result(self, image=None, image_id=None, category_ids=None, bboxes=None, scores=None, save=False, prefix=None):
'''
@brief save the detection result base on one image
@note
@param[in] image ndarray the image
@param[in] image_id int the id of the image
@param[in] category_ids list|[int|category_id, ...] the category of the bboxes
@param[in] bboxes list|[list|[float|top_left_x, float|top_left_y, float|width, float|height], ...]
@param[in] scores list|[float|score, ...]
@param[in] save
bool
save the result to the file or not, if True the _detection_result would be saved to _detection_result file,
_detection_result would be set as None, _detection_result_file would be changed
@param[in] prefix str the prefix of the file name to save the result
'''
sync_status = [image is None, image_id is None, category_ids is None, bboxes is None, scores is None]
if True in sync_status:
COCODataLogger.warning(Fore.RED + 'None found in the result data, nothing would be add to result' + Fore.RESET)
else:
used_wh = image.shape[0: 2][::-1]
self._detection_result = pd.DataFrame() if self._detection_result is None else self._detection_result
result_temp = list()
for category_id, bbox, score in zip(category_ids, bboxes, scores):
result_temp.append({'image_id': image_id, 'category_id': category_id, 'bbox': bbox, 'score': score})
self._detection_result = self._detection_result.append(result_temp, ignore_index=True)
if save:
if self._detection_result is not None:
# : save the _detection_result
self._detection_result.set_index(['image_id'], inplace=True)
detection_result_file_path = os.path.join(self._information_save_to_path, \
COCOBase.generate_result_file_name(prefix, self._detection_result_file_name))
self._detection_result.to_csv(detection_result_file_path)
pass
self._detection_result = None
#self._detection_result_file_name = \
#'{}_{}-{}.csv'.format(prefix, self._detection_result_file_name.split('.')[0], \
# 1 if len(self._detection_result_file_name.split('.')[0].split('-')) == 1 else int(self._detection_result_file_name.split('.')[0].split('-')[-1]) + 1)
#self._detection_result_file = os.path.join(self._information_save_to_path, self._detection_result_file_name)
pass
pass
def evaluate(self, image_ids=None, cat_ids=None, prefix=None, use_visual=False):
pass
##@brief evaluate the performance
# @note use the result files in the self._information_save_to_path, combine all result files and save to a json file, and
# then we would use this json file to evaluate the performance, base on object the image_ids Cap cat_ids
# @param[in] image_ids the images would be considered in the evaluate, 当没有指定时,则对目标coco的getImgIds的所有image进行evaluate
# @param[in] cat_ids the categories would be considered in the evaluate,当没有指定时,则对目标coco的getCatIds的所有cat进行evaluate
# @param[in] scores list格式,阈值,超过该值的bbox才被考虑
# @param[in] ious list格式,阈值,考虑基于这些iou阈值的ap与ar
def evaluate_detection(self, image_ids=None, cat_ids=None, scores=None, ious=None, prefix=None, use_visual=False):
assert type(prefix).__name__ == 'list' or prefix is None or type(prefix).__name__ == 'str'
target_files = [COCOBase.generate_result_file_name(prefix, self._detection_result_file_name) for _prefix in prefix] if type(prefix).__name__ == 'list' \
else [COCOBase.generate_result_file_name(prefix, self._detection_result_file_name)]
detection_result = None
for target_file in target_files:
detection_result_temp = pd.read_csv(os.path.join(self._information_save_to_path, target_file), \
converters={'bbox': lambda x: [float(t.strip('[').strip(']')) for t in x.split(',')]})
if detection_result is not None:
detection_result = detection_result.append(detection_result_temp)
else:
detection_result = detection_result_temp
pass
pass
cat_ids = cat_ids if cat_ids is not None else self._instances_coco.getCatIds()
if scores is not None:
t = lambda x, score: x >= score
else:
t = lambda x, score: x != None
scores = [None] if scores is None else scores
for score in scores:
COCODataLogger.info('evaluate with score: {}'.format(score))
json_file_path = os.path.join(self._information_save_to_path, '{}_score_{}_formated_sub_detection_result.json'.format(prefix, score))
evaluate_detection_result_file_path = os.path.join(self._information_save_to_path, '{}_score_{}_result.json'.format(prefix, score))
sub_detection_result = detection_result[t(detection_result['score'], score)]
if use_visual:
visual_save_to = os.path.join(self._information_save_to_path, '{}-{}'.format(prefix, score))
if os.path.exists(visual_save_to) and os.path.isdir(visual_save_to):
pass
else:
os.mkdir(visual_save_to)
pass
from Putil.trainer.visual.image_visual.point_visual import PointVisual
from Putil.trainer.visual.image_visual.rectangle_visual import RectangleVisual
pv = PointVisual(); rv = RectangleVisual(2)
image_ids = self._instances_coco.getImgids() if image_ids is None else image_ids
img_anns = self._instances_coco.loadImgs(image_ids)
for image_id in image_ids:
img_ann = self._instances_coco.loadImgs([image_id])[0]
img_numpy = self.read_image(img_ann['file_name'])
result_for_this_image = sub_detection_result[sub_detection_result['image_id']==img_ann['id']]
def return_center_xy(s):
'''提供生成中心点x,y的方法,用于DataFrame的apply'''
s['x'] = s['bbox'][0] + 0.5 * s['bbox'][2]
s['y'] = s['bbox'][1] + 0.5 * s['bbox'][3]
return s
def return_normal_rectangle(s):
'''提供分离bbox的方法,因为原本在DataFrame中存储bbox使用的是一个list,没法转化为[*, 4]格式的ndarry
目前没有找到其他方法,使用该函数分离[top_x, top_y, width, height]'''
s['top_x'], s['top_y'], s['w'], s['h'] = s['bbox'][0], s['bbox'][1], s['bbox'][2], s['bbox'][3]
return s
# : 获取当前coco的label
labels_for_this_image = self._instances_coco.loadAnns(self._instances_coco.getAnnIds(imgIds=[image_id], catIds=cat_ids))
if not result_for_this_image.empty:
# visual the pre
img_visual = pv.visual_index(img_numpy, result_for_this_image.apply(return_center_xy, axis=1)[['x', 'y']].values, [0, 255, 0])
img_visual = rv.rectangle_visual(img_visual, pd.DataFrame(result_for_this_image['bbox']).apply(return_normal_rectangle, axis=1)[['top_x', 'top_y', 'w', 'h']].values, \
scores=result_for_this_image['score'], fontScale=0.3)
else:
img_visual = img_numpy
if len(labels_for_this_image) != 0:
# visual the gt
gt_bboxes = np.array([label['bbox'] for label in labels_for_this_image])
gt_center_xy = gt_bboxes[:, 0: 2] + gt_bboxes[:, 2: 4] / 2.0
img_visual = pv.visual_index(img_visual, gt_center_xy, [255, 0, 0])
img_visual = rv.rectangle_visual(img_visual, gt_bboxes, fontScale=0.3, color_map=[[255, 0, 0]])
cv2.imwrite(os.path.join(visual_save_to, '{}.png'.format(img_ann['id'])), cv2.cvtColor(img_visual, cv2.COLOR_RGB2BGR))
pass
index_name = {index: name for index, name in enumerate(list(sub_detection_result.columns))}
sub_detection_result_formated = [{index_name[index]: tt for index, tt in enumerate(t)} for t in list(np.array(sub_detection_result))]
with open(json_file_path, 'w') as fp:
json.dump(sub_detection_result_formated, fp)
sub_detection_result_coco = self._instances_coco.loadRes(json_file_path)
#result_image_ids = sub_detection_result_coco.getImgIds()
cocoEval = cocoeval.CustomCOCOeval(self._instances_coco, sub_detection_result_coco, 'bbox')
cocoEval.params.imgIds = np.array(image_ids) if image_ids is not None else cocoEval.params.imgIds
cocoEval.params.catIds = np.array(cat_ids) if cat_ids is not None else cocoEval.params.catIds
cocoEval.params.iouThrs = np.array(ious) if ious is not None else cocoEval.params.iouThrs
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
pass
def evaluate_segmentation(self, image_ids, cat_ids, prefix):
raise NotImplementedError('evaluate segmentation is not implemented')
pass
def evaluate_keypoint(self, image_ids, cat_ids, prefix):
raise NotImplementedError('evaluate keypoint is not implemented')
pass
def evaluate_panoptic(self, image_ids, cat_ids, prefix):
raise NotImplementedError('evaluate panoptic is not implemented')
pass
def get_detection_indicator(self, scores, ious, pre_file, image_ids=None, cat_ids=None):
'''
@brief
@note calculate the map
'''
target_image_ids = image_ids if image_ids is not None else self._data_field
target_cat_ids = cat_ids if cat_ids is not None else self._cat_ids
pass
pass
class COCOData(COCOBase):
@staticmethod
def set_seed(seed):
pcd.CommonDataWithAug.set_seed(seed)
pass
def set_detection_label_generator(self, generator):
pass
def set_key_points_label_generator(self, generator):
pass
def set_stuff_label_generator(self, generator):
pass
def set_panoptic_label_generator(self, generator):
pass
def set_dense_pose_label_generator(self, generator):
pass
def set_captions_label_generator(self, generator):
pass
@staticmethod
def __get_common_id(id_lists):
if len(id_lists) > 1:
common_list = list()
for sample in id_lists[0]:
view = [(sample in id_list) if id_list is not None else True for id_list in id_lists[1:]]
common_list.append(sample) if False not in view else None
pass
return common_list
else:
return id_lists[0]
pass
def __init__(
self,
coco_root_dir,
stage,
information_save_to_path=None,
detection=False,
key_points=False,
stuff=False,
panoptic=False,
dense_pose=False,
captions=False,
cat_ids=None,
use_rate=1.0,
image_width=128,
image_height=128,
remain_strategy=pcd.CommonData.RemainStrategy.Drop):
'''
@brief focus on coco2017
@note
@param[in] stage
the stage of the dataset, Stage.STAGE_TRAIN,Stage.STAGE_EVAL or Stage.STAGE_TEST
@param[in] coco_root_dir
the root dir of the coco, the annotations is the path which contain the ann files
@param[in] information_save_to_path
the path to save the data information
@param[in] detection
read the detection label or not(in the file: instances)
@param[in] stuff
read the stuff label or not(in the file instances)
@param[in] panoptic
read the panoptic label or not(in the file)
@param[in] dense_pose
read the dense_pose label or not
@param[in] captions
read the captions label or not
@param[in] cat_ids
used as sub_data
@param[in] use_rate
data used rate
'''
self._image_width = image_width
self._image_height = image_height
COCOBase.__init__(self, coco_root_dir, stage, information_save_to_path, detection, \
key_points, stuff, panoptic, dense_pose, captions, use_rate, remain_strategy, cat_ids)
pass
def _restart_process(self, restart_param):
self._image_width = restart_param('image_width', self._image_width)
self._image_height = restart_param.get('image_height', self._image_height)
pass
def _inject_operation(self, inject_param):
pass
def __generate_base_image_information(self, image_ann):
#import pdb;pdb.set_trace()
return [image_ann[0]['height'], image_ann[0]['width'], image_ann[0]['id']]
class BaseInformationIndex(Enum):
ImageHeightIndex = 0
ImageWidthIndex = 1
ImageIdIndex = -1
@staticmethod
def get_image_height(base_information):
return base_information[COCOData.BaseInformationIndex.ImageHeightIndex]
def get_image_width(base_information):
return base_information[COCOData.BaseInformationIndex.ImageWidthIndex]
def get_image_id(base_information):
return base_information[COCOData.BaseInformationIndex.ImageIdIndex]
def _generate_from_origin_index(self, index):
'''
@brief generate the image [detection_label ]
@note
@ret
[0] image [height, width, channel] np.float32
[1] bboxes list float [[top_x, top_y, width, height], ....(boxes_amount)]
[2] base_information list|[list|[int|image_height, int|image_width, int|image_id], ...]
[-1] classes list int [class_index, ....] 0 for the background class may not equal with the category_id
'''
if self._stage == COCOData.Stage.Test:
return self.__generate_test_from_origin_index(index)
elif True in [self._detection, self._stuff, self._panoptic]:
datas = COCOBase.generate_default_datas()
image_ann = self._instances_coco.loadImgs(self._data_field[index])
base_information = COCOBase.generate_base_information(image_ann)
ann_ids = self._instances_coco.getAnnIds(self._data_field[index])
anns = self._instances_coco.loadAnns(ann_ids)
image = self.read_image(image_ann[0]['file_name'])
# debug check
#for ann in anns:
# box = ann['bbox']
# if (box[0] + box[2] > image.shape[1]) or (box[1] + box[3] > image.shape[0]):
# COCODataLogger.info(box)
# pass
# pass
#plt.axis('off')
##COCODataLogger.debug(image.shape)
#plt.imshow(image)
resize_width = self._image_width
resize_height = self._image_height
x_scale = float(resize_width) / image.shape[1]
y_scale = float(resize_height) / image.shape[0]
image = cv2.resize(image, (resize_width, resize_height), interpolation=Image.BILINEAR)
#self._instances_coco.showAnns(anns, draw_bbox=True)
#plt.show()
bboxes = list()
classes = list()
for ann in anns:
if self._cat_ids is None:
pass
elif ann['category_id'] not in self._cat_ids:
continue
box = ann['bbox']
classes.append(self._cat_id_to_represent[ann['category_id']])
#bboxes.append([(box[0] + 0.5 * box[2]) * x_scale, (box[1] + 0.5 * box[3]) * y_scale, box[2] * x_scale, box[3] * y_scale])
bboxes.append([box[0] * x_scale, box[1] * y_scale, box[2] * x_scale, box[3] * y_scale])
pass
#for box in bboxes:
# cv2.rectangle(image, (box[0] - box[])
#assert detection_util.rect_angle_over_border(bboxes, image.shape[1], image.shape[0]) is False, "cross the border"
#if index == 823:
# pass
if len(bboxes) != 0:
bboxes = detection_util.clip_box_using_image(bboxes, image)
classes = np.delete(classes, np.argwhere(np.isnan(bboxes)), axis=0)
bboxes = np.delete(bboxes, np.argwhere(np.isnan(bboxes)), axis=0)
datas[COCOBase.base_information_index] = base_information
datas[COCOBase.image_index] = image
datas[COCOBase.detection_box_index] = bboxes
datas[COCOBase.detection_class_index] = classes
#ret = self._aug_check(*ret)
COCODataLogger.warning('original data generate no obj') if len(datas[COCOBase.detection_box_index]) == 0 and putil_status.putil_is_debug() else None
return tuple(datas)
else:
raise NotImplementedError('unimplemented')
pass
pass
def _aug_check(self, *args):
if self._stage == COCOData.Stage.Train or (self._stage == COCOData.Stage.Evaluate):
if True in [self._detection, self._stuff, self._panoptic]:
bboxes = args[COCOBase.detection_box_index]
classes = args[COCOBase.detection_class_index]
assert len(bboxes) == len(classes)
COCODataLogger.warning('zero obj occur') if len(bboxes) == 0 and putil_status.putil_is_debug() else None
if len(bboxes) == 0:
pass
assert np.argwhere(np.isnan(np.array(bboxes))).size == 0
pass
else:
# TODO: other type
pass
elif self._stage == COCOData.Stage.Test:
pass
else:
raise ValueError('stage: {} not supported'.format(self._stage))
pass
return args
@staticmethod
def statistic(coco_root='', year=''):
'''
generate a better statistic file for coco data, which should be easier to use
'''
train_root = os.path.join(coco_root, 'train{0}'.format(year))
test_root = os.path.join(coco_root, 'test{0}'.format(year))
val_root = os.path.join(coco_root, 'val{0}'.format(year))
# get the label field, which data is unlabeled, which is labeled
with open('/data/Public_Data/COCO/annotations/instances_train2017.json', 'r') as fp:
instances = json.load(fp)
pass
# if the image does not exist, download the image
# instances
pass
def __generate_test_from_origin_index(self, index):
image_ann = self._image_test.loadImgs(self._image_test_img_ids[index])
image = self.read_image(image_ann[0]['file_name'])
resize_width = self._image_width
resize_height = self._image_height
x_scale = float(resize_width) / image.shape[1]
y_scale = float(resize_height) / image.shape[0]
image = cv2.resize(image, (resize_width, resize_height), interpolation=Image.BILINEAR)
return image, image_ann[0]['id']
def __generate_instance_from_origin_index(self, index):
pass
def __generate_keypoint_from_origin_index(self, index):
pass
def __generate_caption_from_origin_index(self, index):
pass
pass
pcd.CommonDataManager.register('COCOData', COCOData)
class SubCOCOData(COCOData):
def __init__(self):
pass
from torch.utils.data import Dataset
class COCODataWithTorch(COCOData, Dataset):
def __init__(
self,
coco_root_dir,
stage,
information_save_to_path=None,
detection=False,
key_points=False,
stuff=False,
panoptic=False,
dense_pose=False,
captions=False,
cat_ids=None,
use_rate=1.0,
image_width=128,
image_height=128,
remain_strategy=None):
COCOData.__init__(self, coco_root_dir=coco_root_dir, stage=stage, information_save_to_path=information_save_to_path, \
detection=detection, key_points=key_points, stuff=stuff, panoptic=panoptic, dense_pose=dense_pose, captions=captions, \
cat_ids=cat_ids, use_rate=use_rate, image_height=image_height, image_width=image_width, remain_strategy=remain_strategy)
Dataset.__init__(self)
pass
pass
#class COCOCommonAugBase:
# instance_image_index = 0
# image_index = instance_image_index
# instance_bboxes_index = 1
# bboxes_index = instance_bboxes_index
# base_information_index = 2
# instance_classes_index= -1
# classes_index = instance_classes_index
#
# @staticmethod
# def _repack(*original_input, image=None, bboxes=None, classes=None):
# #import pdb; pdb.set_trace()
# image = image if image is not None else original_input[COCOCommonAugBase.image_index]
# bboxes = np.array(bboxes if bboxes is not None else original_input[COCOCommonAugBase.bboxes_index])
# classes = np.array(classes if classes is not None else original_input[COCOCommonAugBase.classes_index])
# base_information = np.array(original_input[COCOCommonAugBase.base_information_index])
# classes = np.delete(classes, np.argwhere(np.isnan(bboxes)), axis=0)
# bboxes = np.delete(bboxes, np.argwhere(np.isnan(bboxes)), axis=0)
# return image, bboxes.tolist(), base_information, classes.tolist()
#
# @staticmethod
# def image(*args):
# return args[COCOCommonAugBase.image_index]
#
# @staticmethod
# def bboxes(*args):
# return args[COCOCommonAugBase.bboxes_index]
#
# @staticmethod
# def classes(*args):
# return args[COCOCommonAugBase.classes_index]
#
# @staticmethod
# def base_information(*args):
# return args[COCOCommonAugBase.base_information_index]
# pass
#
##pcd.CommonDataManager.register('COCO', COCO) | 52.379032 | 1,210 | 0.656967 |
6d210eff071ee8e2d2f0f5eebf1778f130d94e7c | 8,146 | py | Python | python/cudf/setup.py | sleeepyjack/cudf | 908c13032800c7ae0d11fc30102f8d273a3ac1db | [
"Apache-2.0"
] | null | null | null | python/cudf/setup.py | sleeepyjack/cudf | 908c13032800c7ae0d11fc30102f8d273a3ac1db | [
"Apache-2.0"
] | null | null | null | python/cudf/setup.py | sleeepyjack/cudf | 908c13032800c7ae0d11fc30102f8d273a3ac1db | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018-2021, NVIDIA CORPORATION.
import os
import re
import shutil
import subprocess
import sys
import sysconfig
# Must import in this order:
# setuptools -> Cython.Distutils.build_ext -> setuptools.command.build_ext
# Otherwise, setuptools.command.build_ext ends up inheriting from
# Cython.Distutils.old_build_ext which we do not want
import setuptools
try:
from Cython.Distutils.build_ext import new_build_ext as _build_ext
except ImportError:
from setuptools.command.build_ext import build_ext as _build_ext
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
import numpy as np
import pyarrow as pa
import setuptools.command.build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
import versioneer
install_requires = [
"numba>=0.53.1",
"Cython>=0.29,<0.30",
"fastavro>=0.22.9",
"fsspec>=0.6.0",
"numpy",
"pandas>=1.0,<1.4.0dev0",
"typing_extensions",
"protobuf",
"nvtx>=0.2.1",
"cachetools",
"packaging",
]
extras_require = {
"test": [
"pytest",
"pytest-benchmark",
"pytest-xdist",
"hypothesis" "mimesis",
"pyorc",
"msgpack",
"transformers",
]
}
cython_files = ["cudf/**/*.pyx"]
def get_cuda_version_from_header(cuda_include_dir, delimeter=""):
cuda_version = None
with open(
os.path.join(cuda_include_dir, "cuda.h"), "r", encoding="utf-8"
) as f:
for line in f.readlines():
if re.search(r"#define CUDA_VERSION ", line) is not None:
cuda_version = line
break
if cuda_version is None:
raise TypeError("CUDA_VERSION not found in cuda.h")
cuda_version = int(cuda_version.split()[2])
return "%d%s%d" % (
cuda_version // 1000,
delimeter,
(cuda_version % 1000) // 10,
)
CUDA_HOME = os.environ.get("CUDA_HOME", False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which("cuda-gdb")
if path_to_cuda_gdb is None:
raise OSError(
"Could not locate CUDA. "
"Please set the environment variable "
"CUDA_HOME to the path to the CUDA installation "
"and try again."
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}")
cuda_include_dir = os.path.join(CUDA_HOME, "include")
cuda_lib_dir = os.path.join(CUDA_HOME, "lib64")
install_requires.append(
"cupy-cuda" + get_cuda_version_from_header(cuda_include_dir)
)
CUDF_HOME = os.environ.get(
"CUDF_HOME",
os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
),
)
CUDF_ROOT = os.environ.get(
"CUDF_ROOT",
os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../cpp/build/"
)
),
)
class build_ext_and_proto_no_debug(_build_ext):
def build_extensions(self):
def remove_flags(compiler, *flags):
for flag in flags:
try:
compiler.compiler_so = list(
filter((flag).__ne__, compiler.compiler_so)
)
except Exception:
pass
# Full optimization
self.compiler.compiler_so.append("-O3")
# Silence '-Wunknown-pragmas' warning
self.compiler.compiler_so.append("-Wno-unknown-pragmas")
# No debug symbols, full optimization, no '-Wstrict-prototypes' warning
remove_flags(
self.compiler, "-g", "-G", "-O1", "-O2", "-Wstrict-prototypes"
)
super().build_extensions()
def finalize_options(self):
if self.distribution.ext_modules:
# Delay import this to allow for Cython-less installs
from Cython.Build.Dependencies import cythonize
nthreads = getattr(self, "parallel", None) # -j option in Py3.5+
nthreads = int(nthreads) if nthreads else None
self.distribution.ext_modules = cythonize(
self.distribution.ext_modules,
nthreads=nthreads,
force=self.force,
gdb_debug=False,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
)
# Skip calling super() and jump straight to setuptools
setuptools.command.build_ext.build_ext.finalize_options(self)
def run(self):
# Get protoc
protoc = None
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
protoc = os.environ["PROTOC"]
else:
protoc = find_executable("protoc")
if protoc is None:
sys.stderr.write("protoc not found")
sys.exit(1)
# Build .proto file
for source in ["cudf/utils/metadata/orc_column_statistics.proto"]:
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.getmtime(source) > os.path.getmtime(output)
):
with open(output, "a") as src:
src.write("# flake8: noqa" + os.linesep)
src.write("# fmt: off" + os.linesep)
subprocess.check_call([protoc, "--python_out=.", source])
with open(output, "r+") as src:
new_src_content = (
"# flake8: noqa"
+ os.linesep
+ "# fmt: off"
+ os.linesep
+ src.read()
+ "# fmt: on"
+ os.linesep
)
src.seek(0)
src.write(new_src_content)
# Run original Cython build_ext command
_build_ext.run(self)
extensions = [
Extension(
"*",
sources=cython_files,
include_dirs=[
os.path.abspath(os.path.join(CUDF_HOME, "cpp/include/cudf")),
os.path.abspath(os.path.join(CUDF_HOME, "cpp/include")),
os.path.abspath(os.path.join(CUDF_ROOT, "include")),
os.path.join(CUDF_ROOT, "_deps/libcudacxx-src/include"),
os.path.join(CUDF_ROOT, "_deps/dlpack-src/include"),
os.path.join(
os.path.dirname(sysconfig.get_path("include")),
"libcudf/libcudacxx",
),
os.path.dirname(sysconfig.get_path("include")),
np.get_include(),
pa.get_include(),
cuda_include_dir,
],
library_dirs=(
pa.get_library_dirs()
+ [
get_python_lib(),
os.path.join(os.sys.prefix, "lib"),
cuda_lib_dir,
]
),
libraries=["cudart", "cudf"] + pa.get_libraries() + ["arrow_cuda"],
language="c++",
extra_compile_args=["-std=c++17"],
)
]
cmdclass = versioneer.get_cmdclass()
cmdclass["build_ext"] = build_ext_and_proto_no_debug
setup(
name="cudf",
version=versioneer.get_version(),
description="cuDF - GPU Dataframe",
url="https://github.com/rapidsai/cudf",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# Include the separately-compiled shared library
setup_requires=["cython", "protobuf"],
ext_modules=extensions,
packages=find_packages(include=["cudf", "cudf.*"]),
package_data=dict.fromkeys(
find_packages(include=["cudf._lib*"]), ["*.pxd"],
),
cmdclass=cmdclass,
install_requires=install_requires,
zip_safe=False,
extras_require=extras_require,
)
| 30.856061 | 79 | 0.581758 |
38b61b117c5200df6c403f6dd135e042bc75216e | 3,762 | py | Python | Demo/tkinter/matt/two-radio-groups.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2021-12-26T22:20:34.000Z | 2021-12-26T22:20:34.000Z | Demo/tkinter/matt/two-radio-groups.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | null | null | null | Demo/tkinter/matt/two-radio-groups.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | from tkinter import *
# The way to think about this is that each radio button menu
# controls a different variable -- clicking on one of the
# mutually exclusive choices in a radiobutton assigns some value
# to an application variable you provide. When you define a
# radiobutton menu choice, you have the option of specifying the
# name of a varaible and value to assign to that variable when
# that choice is selected. This clever mechanism relieves you,
# the programmer, from having to write a dumb callback that
# probably wouldn't have done anything more than an assignment
# anyway. The Tkinter options for this follow their Tk
# counterparts:
# {"variable" : my_flavor_variable, "value" : "strawberry"}
# where my_flavor_variable is an instance of one of the
# subclasses of Variable, provided in Tkinter.py (there is
# StringVar(), IntVar(), DoubleVar() and BooleanVar() to choose
# from)
def makePoliticalParties(var):
# make menu button
Radiobutton_button = Menubutton(mBar, text='Political Party',
underline=0)
Radiobutton_button.pack(side=LEFT, padx='2m')
# the primary pulldown
Radiobutton_button.menu = Menu(Radiobutton_button)
Radiobutton_button.menu.add_radiobutton(label='Republican',
variable=var, value=1)
Radiobutton_button.menu.add('radiobutton', {'label': 'Democrat',
'variable' : var,
'value' : 2})
Radiobutton_button.menu.add('radiobutton', {'label': 'Libertarian',
'variable' : var,
'value' : 3})
var.set(2)
# set up a pointer from the file menubutton back to the file menu
Radiobutton_button['menu'] = Radiobutton_button.menu
return Radiobutton_button
def makeFlavors(var):
# make menu button
Radiobutton_button = Menubutton(mBar, text='Flavors',
underline=0)
Radiobutton_button.pack(side=LEFT, padx='2m')
# the primary pulldown
Radiobutton_button.menu = Menu(Radiobutton_button)
Radiobutton_button.menu.add_radiobutton(label='Strawberry',
variable=var, value='Strawberry')
Radiobutton_button.menu.add_radiobutton(label='Chocolate',
variable=var, value='Chocolate')
Radiobutton_button.menu.add_radiobutton(label='Rocky Road',
variable=var, value='Rocky Road')
# choose a default
var.set("Chocolate")
# set up a pointer from the file menubutton back to the file menu
Radiobutton_button['menu'] = Radiobutton_button.menu
return Radiobutton_button
def printStuff():
print("party is", party.get())
print("flavor is", flavor.get())
print()
#################################################
#### Main starts here ...
root = Tk()
# make a menu bar
mBar = Frame(root, relief=RAISED, borderwidth=2)
mBar.pack(fill=X)
# make two application variables,
# one to control each radio button set
party = IntVar()
flavor = StringVar()
Radiobutton_button = makePoliticalParties(party)
Radiobutton_button2 = makeFlavors(flavor)
# finally, install the buttons in the menu bar.
# This allows for scanning from one menubutton to the next.
mBar.tk_menuBar(Radiobutton_button, Radiobutton_button2)
b = Button(root, text="print party and flavor", foreground="red",
command=printStuff)
b.pack(side=TOP)
root.title('menu demo')
root.iconname('menu demo')
root.mainloop()
| 33.891892 | 77 | 0.623339 |
a2721b26bb01173ae62233042661b3f309e05fe2 | 8,960 | py | Python | py_proto/modules/prediction/proto/prediction_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
] | 2 | 2019-03-04T02:11:04.000Z | 2019-04-18T11:19:45.000Z | py_proto/modules/prediction/proto/prediction_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
] | 1 | 2019-03-15T08:37:53.000Z | 2019-03-15T08:37:53.000Z | py_proto/modules/prediction/proto/prediction_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
] | 1 | 2019-03-04T02:11:09.000Z | 2019-03-04T02:11:09.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/prediction/proto/prediction_conf.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.perception.proto import perception_obstacle_pb2 as modules_dot_perception_dot_proto_dot_perception__obstacle__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/prediction/proto/prediction_conf.proto',
package='apollo.prediction',
syntax='proto2',
serialized_pb=_b('\n.modules/prediction/proto/prediction_conf.proto\x12\x11\x61pollo.prediction\x1a\x32modules/perception/proto/perception_obstacle.proto\"\xe9\x04\n\x0cObstacleConf\x12\x41\n\robstacle_type\x18\x01 \x01(\x0e\x32*.apollo.perception.PerceptionObstacle.Type\x12G\n\x0fobstacle_status\x18\x02 \x01(\x0e\x32..apollo.prediction.ObstacleConf.ObstacleStatus\x12\x45\n\x0e\x65valuator_type\x18\x03 \x01(\x0e\x32-.apollo.prediction.ObstacleConf.EvaluatorType\x12\x45\n\x0epredictor_type\x18\x04 \x01(\x0e\x32-.apollo.prediction.ObstacleConf.PredictorType\"G\n\x0eObstacleStatus\x12\x0b\n\x07ON_LANE\x10\x00\x12\x0c\n\x08OFF_LANE\x10\x01\x12\x0e\n\nSTATIONARY\x10\x03\x12\n\n\x06MOVING\x10\x04\"I\n\rEvaluatorType\x12\x11\n\rMLP_EVALUATOR\x10\x00\x12\x11\n\rRNN_EVALUATOR\x10\x01\x12\x12\n\x0e\x43OST_EVALUATOR\x10\x02\"\xaa\x01\n\rPredictorType\x12\x1b\n\x17LANE_SEQUENCE_PREDICTOR\x10\x00\x12\x17\n\x13\x46REE_MOVE_PREDICTOR\x10\x01\x12\x16\n\x12REGIONAL_PREDICTOR\x10\x02\x12\x1b\n\x17MOVE_SEQUENCE_PREDICTOR\x10\x03\x12\x13\n\x0f\x45MPTY_PREDICTOR\x10\x04\x12\x19\n\x15SINGLE_LANE_PREDICTOR\x10\x05\"H\n\x0ePredictionConf\x12\x36\n\robstacle_conf\x18\x01 \x03(\x0b\x32\x1f.apollo.prediction.ObstacleConf')
,
dependencies=[modules_dot_perception_dot_proto_dot_perception__obstacle__pb2.DESCRIPTOR,])
_OBSTACLECONF_OBSTACLESTATUS = _descriptor.EnumDescriptor(
name='ObstacleStatus',
full_name='apollo.prediction.ObstacleConf.ObstacleStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ON_LANE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OFF_LANE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATIONARY', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVING', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=420,
serialized_end=491,
)
_sym_db.RegisterEnumDescriptor(_OBSTACLECONF_OBSTACLESTATUS)
_OBSTACLECONF_EVALUATORTYPE = _descriptor.EnumDescriptor(
name='EvaluatorType',
full_name='apollo.prediction.ObstacleConf.EvaluatorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MLP_EVALUATOR', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RNN_EVALUATOR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COST_EVALUATOR', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=493,
serialized_end=566,
)
_sym_db.RegisterEnumDescriptor(_OBSTACLECONF_EVALUATORTYPE)
_OBSTACLECONF_PREDICTORTYPE = _descriptor.EnumDescriptor(
name='PredictorType',
full_name='apollo.prediction.ObstacleConf.PredictorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LANE_SEQUENCE_PREDICTOR', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FREE_MOVE_PREDICTOR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REGIONAL_PREDICTOR', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVE_SEQUENCE_PREDICTOR', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMPTY_PREDICTOR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SINGLE_LANE_PREDICTOR', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=569,
serialized_end=739,
)
_sym_db.RegisterEnumDescriptor(_OBSTACLECONF_PREDICTORTYPE)
_OBSTACLECONF = _descriptor.Descriptor(
name='ObstacleConf',
full_name='apollo.prediction.ObstacleConf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_type', full_name='apollo.prediction.ObstacleConf.obstacle_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_status', full_name='apollo.prediction.ObstacleConf.obstacle_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='evaluator_type', full_name='apollo.prediction.ObstacleConf.evaluator_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predictor_type', full_name='apollo.prediction.ObstacleConf.predictor_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_OBSTACLECONF_OBSTACLESTATUS,
_OBSTACLECONF_EVALUATORTYPE,
_OBSTACLECONF_PREDICTORTYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=739,
)
_PREDICTIONCONF = _descriptor.Descriptor(
name='PredictionConf',
full_name='apollo.prediction.PredictionConf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_conf', full_name='apollo.prediction.PredictionConf.obstacle_conf', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=741,
serialized_end=813,
)
_OBSTACLECONF.fields_by_name['obstacle_type'].enum_type = modules_dot_perception_dot_proto_dot_perception__obstacle__pb2._PERCEPTIONOBSTACLE_TYPE
_OBSTACLECONF.fields_by_name['obstacle_status'].enum_type = _OBSTACLECONF_OBSTACLESTATUS
_OBSTACLECONF.fields_by_name['evaluator_type'].enum_type = _OBSTACLECONF_EVALUATORTYPE
_OBSTACLECONF.fields_by_name['predictor_type'].enum_type = _OBSTACLECONF_PREDICTORTYPE
_OBSTACLECONF_OBSTACLESTATUS.containing_type = _OBSTACLECONF
_OBSTACLECONF_EVALUATORTYPE.containing_type = _OBSTACLECONF
_OBSTACLECONF_PREDICTORTYPE.containing_type = _OBSTACLECONF
_PREDICTIONCONF.fields_by_name['obstacle_conf'].message_type = _OBSTACLECONF
DESCRIPTOR.message_types_by_name['ObstacleConf'] = _OBSTACLECONF
DESCRIPTOR.message_types_by_name['PredictionConf'] = _PREDICTIONCONF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ObstacleConf = _reflection.GeneratedProtocolMessageType('ObstacleConf', (_message.Message,), dict(
DESCRIPTOR = _OBSTACLECONF,
__module__ = 'modules.prediction.proto.prediction_conf_pb2'
# @@protoc_insertion_point(class_scope:apollo.prediction.ObstacleConf)
))
_sym_db.RegisterMessage(ObstacleConf)
PredictionConf = _reflection.GeneratedProtocolMessageType('PredictionConf', (_message.Message,), dict(
DESCRIPTOR = _PREDICTIONCONF,
__module__ = 'modules.prediction.proto.prediction_conf_pb2'
# @@protoc_insertion_point(class_scope:apollo.prediction.PredictionConf)
))
_sym_db.RegisterMessage(PredictionConf)
# @@protoc_insertion_point(module_scope)
| 37.805907 | 1,217 | 0.766853 |
2c3574bb9035bfcc0ab41225201833715c4ec8be | 6,030 | py | Python | configs/representation/uvc_fpn_moco2/uvc-moco2_r18-fpn_center_pms0820rc_it_l1_mt02_video_2x8x1_sgd_cosine_30e_k400_rgb.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/uvc_fpn_moco2/uvc-moco2_r18-fpn_center_pms0820rc_it_l1_mt02_video_2x8x1_sgd_cosine_30e_k400_rgb.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/uvc_fpn_moco2/uvc-moco2_r18-fpn_center_pms0820rc_it_l1_mt02_video_2x8x1_sgd_cosine_30e_k400_rgb.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | # model settings
temperature = 0.01
with_norm = True
query_dim = 128
model = dict(
type='UVCNeckMoCoTrackerV2',
queue_dim=query_dim,
patch_queue_size=256 * 144 * 5,
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(0, 1, 2, 3),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
norm_cfg=dict(type='SyncBN', requires_grad=True),
num_outs=4,
out_index=1),
cls_head=dict(
type='UVCHead',
loss_feat=None,
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=with_norm,
loss_weight=1.),
loss_bbox=dict(type='L1Loss', loss_weight=10.),
in_channels=256,
channels=128,
temperature=temperature,
with_norm=with_norm,
init_std=0.01,
track_type='center'),
patch_head=dict(
type='MoCoHead',
loss_feat=dict(type='MultiPairNCE', loss_weight=1.),
in_channels=512,
# num_convs=2,
# kernel_size=3,
# norm_cfg=dict(type='BN'),
# act_cfg=dict(type='ReLU'),
channels=query_dim,
temperature=0.2,
with_norm=with_norm))
# model training and testing settings
train_cfg = dict(
patch_size=96,
patch_moco_scale=(0.8, 2.0),
img_as_ref=True,
img_as_tar=False,
img_as_embed=True,
patch_geo_aug=True,
patch_color_aug=True,
patch_crop_aug=True,
diff_crop=True,
skip_cycle=True,
center_ratio=0.,
shuffle_bn=True)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
# strides=(1, 2, 1, 1),
out_indices=(0, ),
neighbor_range=40,
with_norm=with_norm,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop', area_range=(0.2, 1.)),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False),
# dict(type='RandomGrayScale', p=0.2, same_across_clip=False),
# dict(type='RandomGaussianBlur', p=0.5, same_across_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=48,
workers_per_gpu=4,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=1e-1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 30
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['uvc-fpn-moco2'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 31.40625 | 78 | 0.621725 |
c6a6547b5ac104caf04f4bd80a0c849fe2c9b0b2 | 1,969 | py | Python | Operators/ExampleTextDetectOperator/PostProcess.py | Caius-Lu/Savior | 47c22e06c38cc9b5f7007d79f791015c8b2b76aa | [
"BSD-2-Clause"
] | 108 | 2021-03-19T03:45:48.000Z | 2022-03-29T12:19:38.000Z | Operators/ExampleTextDetectOperator/PostProcess.py | Caius-Lu/Savior | 47c22e06c38cc9b5f7007d79f791015c8b2b76aa | [
"BSD-2-Clause"
] | 2 | 2021-05-12T07:26:21.000Z | 2021-07-16T12:53:52.000Z | Operators/ExampleTextDetectOperator/PostProcess.py | Caius-Lu/Savior | 47c22e06c38cc9b5f7007d79f791015c8b2b76aa | [
"BSD-2-Clause"
] | 27 | 2021-03-19T05:50:26.000Z | 2021-12-28T07:13:09.000Z | import cv2
import numpy as np
from Utils.GeometryUtils import get_min_area_bbox
def db_post_process(_predict_score, _thresh, _bbox_scale_ratio, _min_size=5):
instance_score = _predict_score.squeeze()
h, w = instance_score.shape[:2]
available_region = np.zeros_like(instance_score, dtype=np.float32)
np.putmask(available_region, instance_score > _thresh, instance_score)
to_return_boxes = []
to_return_scores = []
mask_region = (available_region > 0).astype(np.uint8) * 255
structure_element = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
refined_mask_region = cv2.morphologyEx(mask_region, cv2.MORPH_CLOSE, structure_element)
if cv2.__version__.startswith('3'):
_, contours, _ = cv2.findContours(refined_mask_region, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
elif cv2.__version__.startswith('4'):
contours, _ = cv2.findContours(refined_mask_region, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
else:
raise NotImplementedError(f'opencv {cv2.__version__} not support')
for m_contour in contours:
if len(m_contour) < 4 and cv2.contourArea(m_contour) < 16:
continue
m_rotated_box = get_min_area_bbox(refined_mask_region, m_contour, _bbox_scale_ratio)
if m_rotated_box is None:
continue
m_box_width = m_rotated_box['box_width']
m_box_height = m_rotated_box['box_height']
if min(m_box_width * w, m_box_height * h) < _min_size:
continue
to_return_boxes.append(m_rotated_box)
m_available_mask = np.zeros_like(available_region, dtype=np.uint8)
cv2.drawContours(m_available_mask, [m_contour,],0, 255, thickness=-1)
m_region_mask = cv2.bitwise_and(available_region, available_region, mask=m_available_mask)
m_mask_count = np.count_nonzero(m_available_mask)
to_return_scores.append(float(np.sum(m_region_mask) / m_mask_count))
return to_return_boxes, to_return_scores
| 49.225 | 102 | 0.729812 |
28cc131f3d4aace2a9fad3178ff1894016ebad1b | 62,132 | py | Python | tensorflow/python/ops/array_ops.py | mfkasim1/tensorflow | bce8ad95f349372326413b6b1a2670062b749309 | [
"Apache-2.0"
] | 1 | 2022-01-29T23:03:59.000Z | 2022-01-29T23:03:59.000Z | tensorflow/python/ops/array_ops.py | mfkasim1/tensorflow | bce8ad95f349372326413b6b1a2670062b749309 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/array_ops.py | mfkasim1/tensorflow | bce8ad95f349372326413b6b1a2670062b749309 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Casting
TensorFlow provides several operations that you can use to cast tensor data
types in your graph.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@saturate_cast
## Shapes and Shaping
TensorFlow provides several operations that you can use to determine the shape
of a tensor and change the shape of a tensor.
@@shape
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
## Slicing and Joining
TensorFlow provides several operations to slice or extract parts of a tensor,
or join multiple tensors together.
@@slice
@@split
@@tile
@@pad
@@concat
@@pack
@@unpack
@@reverse_sequence
@@reverse
@@transpose
@@space_to_batch
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import logging_ops
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.ops.constant_op import constant
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
# pylint: enable=wildcard-import
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# Aliases for some automatically-generated names.
listdiff = gen_array_ops.list_diff
# DEPRECATED use init_ops.zeros_initializer
# TODO(irving) Move it to init_ops.py
def zeros_initializer(shape, dtype=dtypes.float32):
"""An adaptor for zeros() to match the Initializer spec."""
return zeros(shape, dtype)
# pylint: disable=undefined-variable,protected-access
def _SliceHelper(tensor, slice_spec):
"""Overload for Tensor.__getitem__.
Currently the size of the slice must be statically known in each dimension,
i.e. the "stop" of the slice must not be omitted.
TODO(mrry): Support slices where the sizes are not specified.
TODO(mrry): Support negative indices in slices with numpy/Python semantics.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
indices = []
sizes = []
squeeze_dims = []
for dim, s in enumerate(slice_spec):
if isinstance(s, _baseslice):
if s.step not in (None, 1):
raise NotImplementedError(
"Steps other than 1 are not currently supported")
start = s.start if s.start is not None else 0
if start < 0:
raise NotImplementedError(
"Negative start indices are not currently supported")
indices.append(start)
if s.stop is not None and s.stop < 0:
raise NotImplementedError(
"Negative stop indices are not currently supported")
# NOTE(mrry): If the stop is not specified, Python substitutes
# sys.maxsize, which is typically (2 ** 63) - 1. Since Slice currently
# supports signed DT_INT32 arguments, we use -1 to specify that all
# elements should be captured.
if s.stop is None or s.stop == sys.maxsize:
sizes.append(-1)
else:
if start > s.stop:
raise ValueError("Stop must be at least start")
sizes.append(s.stop - start)
elif s is Ellipsis:
raise NotImplementedError("Ellipsis is not currently supported")
else:
try:
s = int(s)
except TypeError:
raise TypeError("Bad slice index %s of type %s" % (s, type(s)))
if s < 0:
raise NotImplementedError("Negative indices are currently unsupported")
indices.append(s)
sizes.append(1)
squeeze_dims.append(dim)
sliced = slice(tensor, indices, sizes)
if squeeze_dims:
return squeeze(sliced, squeeze_dims=squeeze_dims)
else:
return sliced
def slice(input_, begin, size, name=None):
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def pack(values, name="pack"):
"""Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs tensors in `values` into a tensor with rank one higher than each tensor
in `values` and shape `[len(values)] + values[0].shape`. The output satisfies
`output[i, ...] = values[i][...]`.
This is the opposite of unpack. The numpy equivalent is
tf.pack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A packed `Tensor` with the same type as `values`.
"""
return gen_array_ops._pack(values, name=name)
def unpack(value, num=None, name="unpack"):
"""Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` along the first dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[0]` is not known, `ValueError` is raised.
The ith tensor in `output` is the slice `value[i, ...]`. Each tensor in
`output` has shape `value.shape[1:]`.
This is the opposite of pack. The numpy equivalent is
tf.unpack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unpacked.
num: An `int`. The first dimension of value. Automatically inferred if
`None` (the default).
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unpacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
if num is None:
value = ops.convert_to_tensor(value)
shape = value.get_shape()
num = shape[0].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % shape)
return gen_array_ops._unpack(value, num=num, name=name)
def concat(concat_dim, values, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `concat_dim`. If
`values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Rconcat_dim, ...Dn]
where
Rconcat_dim = sum(Dconcat_dim(i))
That is, the data from the input tensors is joined along the `concat_dim`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `concat_dim` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
```
Args:
concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.
values: A list of `Tensor` objects or a single `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that concat_dim is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(concat_dim,
name="concat_dim",
dtype=dtypes.int32).get_shape(
).assert_is_compatible_with(tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat(concat_dim=concat_dim,
values=values,
name=name)
@ops.RegisterShape("Pack")
def _PackShape(op):
input_shape = op.inputs[0].get_shape()
for inp in op.inputs[1:]:
input_shape = input_shape.merge_with(inp.get_shape())
return [tensor_shape.TensorShape([len(op.inputs)]).concatenate(input_shape)]
@ops.RegisterShape("Unpack")
def _UnpackShape(op):
input_shape = op.inputs[0].get_shape()
return [input_shape[1:]] * op.get_attr("num")
@ops.RegisterShape("Concat")
def _ConcatShape(op):
concat_dim = tensor_util.constant_value(op.inputs[0])
if concat_dim is None:
# Return an unknown shape with the same rank as the inputs, or an
# unknown rank if no input's rank is known.
rank = None
for value in op.inputs[1:]:
if rank is not None:
value.get_shape().assert_has_rank(rank)
else:
rank = value.get_shape().ndims
if rank == 0:
raise ValueError("Can't concatenate scalars (use tf.pack instead)")
return [tensor_shape.unknown_shape(ndims=rank)]
else:
# Merge all the non-concat dims, and sum the concat dim to make an
# output shape.
concat_dim = int(concat_dim)
output_shape = op.inputs[1].get_shape()
for value in op.inputs[2:]:
value_shape = value.get_shape()
if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
raise ValueError("concat_dim is out of range (values rank = %d)" %
value_shape.ndims)
before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
at = output_shape[concat_dim] + value_shape[concat_dim]
after = output_shape[
concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
output_shape = before.concatenate(at).concatenate(after)
return [output_shape]
@ops.RegisterShape("ConcatOffset")
def _ConcatOffsetShape(op):
return [x.get_shape() for x in op.inputs[1:]]
def boolean_mask(tensor, mask, name="boolean_mask"):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = [True, False, True, False]
boolean_mask(tensor, mask) ==> [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
Args:
tensor: N-D tensor. First K dimensions can be None, which allows e.g.
undefined batch size. Trailing dimensions must be specified.
mask: K-D boolean tensor, K <= N.
name: A name for this operation (optional).
Returns:
Tensor populated by entries in `tensor` corresponding to `True` values in
`mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = [True, False, True]
boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices)
with ops.op_scope([tensor, mask], name):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"mask dimensions must be specified, even if some dimensions are None"
". E.g. shape=[None] is ok, but shape=None is not.")
shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)
tensor = reshape(tensor, [-1] + shape_tensor.as_list()[ndims_mask:])
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask)
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices specified
in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask of its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
* `a`: An `IndexedSlices` instance.
* `mask_indices`: Indices of elements to mask.
* `name`: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.op_scope([a, mask_indices], name, "sparse_mask") as name:
indices = a.indices
out_indices, to_gather = listdiff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(split_dim, num_split, value, name="split"):
"""Splits a tensor into `num_split` tensors along one dimension.
Splits `value` along dimension `split_dim` into `num_split` smaller tensors.
Requires that `num_split` evenly divide `value.shape[split_dim]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(1, 3, value)
tf.shape(split0) ==> [5, 10]
```
Args:
split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`.
num_split: A Python integer. The number of ways to split.
value: The `Tensor` to split.
name: A name for the operation (optional).
Returns:
`num_split` `Tensor` objects resulting from splitting `value`.
"""
return gen_array_ops._split(split_dim=split_dim,
num_split=num_split,
value=value,
name=name)
@ops.RegisterShape("Reverse")
def _ReverseShape(op):
dims_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(dims_shape[0])
if input_shape.ndims is not None and input_shape.ndims > 8:
raise ValueError(
"tf.reverse() does not work on tensors with more than 8 dimensions")
return [input_shape]
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(x, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.op_scope([a], name, "transpose") as name:
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.op_scope([shape], name, "zeros") as name:
if isinstance(shape, (list, tuple)):
output = constant(0, shape=shape, dtype=dtype, name=name)
else:
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(0, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
return output
def zeros_like(tensor, dtype=None, name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.op_scope([tensor], name, "zeros_like") as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if dtype is not None and tensor.dtype != dtype:
ret = zeros(shape(tensor), dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
else:
return gen_array_ops._zeros_like(tensor, name=name)
def ones_like(tensor, dtype=None, name=None):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.op_scope([tensor], name, "ones_like") as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape(tensor)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
ret.set_shape(tensor.get_shape())
return ret
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.op_scope([shape], name, "ones") as name:
if isinstance(shape, (list, tuple)):
output = constant(1, shape=shape, dtype=dtype, name=name)
else:
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
output = fill(shape, constant(1, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1,], [2, 2]].
# rank of 't' is 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
if mode == "CONSTANT":
return gen_array_ops._pad(tensor, paddings, name=name)
if mode == "REFLECT":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="REFLECT",
name=name)
if mode == "SYMMETRIC":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="SYMMETRIC",
name=name)
raise ValueError("Unknown padding mode: %s" % mode)
@ops.RegisterShape("Placeholder")
def _PlaceholderShape(op):
given_shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
if given_shape:
return [tensor_shape.TensorShape(given_shape)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("CheckNumerics")
@ops.RegisterShape("Identity")
@ops.RegisterShape("RefIdentity")
@ops.RegisterShape("StopGradient")
@ops.RegisterShape("BatchMatrixBandPart")
def _UnchangedShape(op):
return [op.inputs[0].get_shape()]
@ops.RegisterShape("Rank")
@ops.RegisterShape("Size")
def _ScalarShape(unused_op):
return [tensor_shape.scalar()]
@ops.RegisterShape("Slice")
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank(1)
sizes_shape = op.inputs[2].get_shape().with_rank(1)
ndims = begin_shape.merge_with(sizes_shape)[0].value
if ndims is not None:
input_shape.assert_has_rank(ndims)
begin_value = tensor_util.constant_value(op.inputs[1])
sizes_value = tensor_util.constant_value(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, slice_size in enumerate(sizes_value.ravel()):
if slice_size != -1:
returned_dims.append(slice_size)
elif begin_value is not None:
returned_dims.append(input_shape[i] - begin_value[i])
else:
returned_dims.append(None)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("Gather")
def _GatherShape(op):
"""Shape function for array_ops.gather."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
return [indices_shape.concatenate(params_shape[1:])]
@ops.RegisterShape("GatherNd")
def _GatherNdShape(op):
"""Shape function for array_ops.gather_nd."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank_at_least(2)
if indices_shape.ndims is not None:
indices_shape[-1].merge_with(params_shape.ndims)
return [indices_shape[:-1]]
@ops.RegisterShape("Unique")
def _UniqueShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape]
@ops.RegisterShape("UniqueWithCounts")
def _UniqueWithCountsShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape, tensor_shape.vector(None)]
@ops.RegisterShape("BatchMatrixDiag")
def _BatchMatrixDiagShape(op):
"""Shape function for array_ops.batch_matrix_diag."""
diag_shape = op.inputs[0].get_shape().with_rank_at_least(1)
return [diag_shape.concatenate(diag_shape[-1])]
@ops.RegisterShape("BatchMatrixDiagPart")
def _BatchMatrixDiagPartShape(op):
"""Shape function for array_ops.batch_matrix_diag_part."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(2)
# Last two dims must match
input_shape[-1].assert_is_compatible_with(input_shape[-2])
return [input_shape[:-1]]
@ops.RegisterShape("Diag")
def _DiagShape(op):
"""Shape function for array_ops.diag.
This op has one input (of rank k <= 3), and one output (of rank 2k),
where the shape of the output is the concatenation of the input
shape with itself.
Args:
op: A Diag Operation.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(3)
return [input_shape.concatenate(input_shape)]
@ops.RegisterShape("DiagPart")
def _DiagPartShape(op):
"""Shape function for array_ops.diag_part.
This op has one input (of rank k = 2, 4, or 6), and one output (of rank k/2),
where the shape of the output is the diagonal of the input shape.
Args:
op: A DiagPart Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If input has odd rank or greater than 6, or the first and
second halves of the shape are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(6)
rank = input_shape.ndims
if rank is None:
return [tensor_shape.unknown_shape()]
if rank % 2:
raise ValueError("Input must be even rank, got rank = " + str(rank) + ".")
mid = rank // 2
return [input_shape[:mid].merge_with(input_shape[mid:])]
@ops.RegisterShape("ExpandDims")
def _ExpandDimsShape(op):
"""Determine shape for expand op's output tensor.
Args:
op: Operation for which to determine shape.
op.inputs[0] is the input tensor.
op.inputs[1] is the dimension in which to expand.
Returns:
Shape of op's output tensor.
Raises:
ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
of dimensions in the input tensor.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
dim = tensor_util.constant_value(op.inputs[1])
input_ndims = input_shape.ndims
if dim < -input_ndims - 1 or dim > input_ndims:
raise ValueError(
"dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
if dim < 0:
dim += (input_ndims + 1)
result_shape = list(input_shape.dims)
result_shape.insert(dim, 1)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Squeeze")
def _SqueezeShape(op):
"""Determine shape for squeeze op's output tensor.
Args:
op: Operation for which to determine shape.
Returns:
Shape of op's output tensor.
Raises:
ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
where rank is the number of dimensions in the input tensor. Or, if
squeeze_dims includes a dimension for which input shape has a value
not equal to 1.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
squeeze_dims = op.get_attr("squeeze_dims") or []
wrapped_squeeze_dims = []
input_ndims = input_shape.ndims
for i, squeeze_dim in enumerate(squeeze_dims):
if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
raise ValueError(
"squeeze_dims[%d]=%d not in [%d, %d)." % (
i, squeeze_dim, -input_ndims, input_ndims))
if squeeze_dim < 0:
squeeze_dim += input_ndims
wrapped_squeeze_dims.append(squeeze_dim)
result_shape = []
for i, dim in enumerate([d.value for d in input_shape.dims]):
is_explicit_match = i in wrapped_squeeze_dims
if dim is None:
if is_explicit_match:
# Assume that the squeezed dimension will be 1 at runtime.
continue
if not wrapped_squeeze_dims:
# If squeezing all 1 dimensions and we see a None, give up.
return [tensor_shape.unknown_shape()]
elif dim == 1:
if is_explicit_match or not wrapped_squeeze_dims:
continue
elif is_explicit_match:
raise ValueError(
"Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
i, dim))
result_shape.append(dim)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Bitcast")
def _BitcastShape(op):
"""Shape function for Bitcast op."""
input_shape = op.inputs[0].get_shape()
input_type = op.inputs[0].dtype
size_of_input = input_type.size
output = dtypes.as_dtype(op.get_attr("type"))
size_of_output = output.size
if size_of_input == size_of_output:
return [tensor_shape.TensorShape(input_shape)]
else:
if size_of_output > size_of_input:
new_shape = input_shape.as_list()
last_val = new_shape[-1]
if last_val == (size_of_output // size_of_input):
new_shape = new_shape[:-1]
else:
raise ValueError(
"Cannot bitcast due to shape. %d is not evenly divisible by %d." %
(new_shape[-1], size_of_input // size_of_output))
else:
new_shape = input_shape
new_shape = new_shape.concatenate([size_of_input // size_of_output])
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("Reshape")
def _ReshapeShape(op):
"""Shape function for Reshape op."""
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is not None:
num_elements = tensor_shape.Dimension(1)
for dim in input_shape.dims:
num_elements *= dim
else:
num_elements = tensor_shape.Dimension(None)
new_shape_shape = op.inputs[1].get_shape().with_rank(1)
new_shape = tensor_util.constant_value(op.inputs[1])
if new_shape is None:
# Attempt to infer the rank of the output from the length of
# new_shape.
return [tensor_shape.unknown_shape(ndims=new_shape_shape[0].value)]
new_shape = np.reshape(new_shape, -1).tolist()
if -1 not in new_shape:
# The new shape is fully defined.
if (num_elements.value is not None
and num_elements.value != np.prod(new_shape)):
raise ValueError(
"Cannot reshape a tensor with %d elements to shape %s (%d elements)"
% (num_elements.value, new_shape, np.prod(new_shape)))
return [tensor_shape.TensorShape(new_shape)]
elif num_elements.value is not None:
# We know the number of elements, so we can calculate the missing
# dimension in the new_shape.
known_elements = 1
unknown_index = None
for i, dim in enumerate(new_shape):
if dim == -1:
unknown_index = i
else:
known_elements *= dim
if known_elements == 0:
raise ValueError("cannot infer the missing input size for "
"an empty tensor unless all specified "
"input sizes are non-zero")
if num_elements % known_elements != 0:
raise ValueError("input has %s elements, which isn't divisible by %d" %
(num_elements, known_elements))
new_shape[unknown_index] = num_elements // known_elements
return [tensor_shape.TensorShape(new_shape)]
else:
# We don't know the input shape, but we know n-1 of the dimensions
# in the new shape.
new_shape[new_shape.index(-1)] = None
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("BroadcastGradientArgs")
def _BroadcastGradientArgsShape(op):
"""Shape function for the BroadcastGradientArgs op."""
# TODO(mrry): Implement constant_value for BroadcastGradientArgs?
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
return [tensor_shape.vector(None), tensor_shape.vector(None)]
@ops.RegisterShape("Fill")
def _FillShape(op):
"""Shape function for the Fill op.
This op takes a vector of dimensions and a scalar, and produces a
tensor with the given dimensions.
Args:
op: A Fill Operation.
Returns:
A single-element list containing the shape of the output.
"""
dimensions_shape = op.inputs[0].get_shape().with_rank(1)
op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())
fill_dims = tensor_util.constant_value(op.inputs[0])
if fill_dims is None:
# Attempt to infer the rank of the output from the length of
# dimensions.
return [tensor_shape.unknown_shape(ndims=dimensions_shape[0].value)]
else:
return [tensor_shape.TensorShape(fill_dims.tolist())]
@ops.RegisterShape("InvertPermutation")
def _InvertPermutationShape(op):
"""Shape function for the InvertPermutation op."""
return [op.inputs[0].get_shape().with_rank(1)]
@ops.RegisterShape("ListDiff")
def _ListDiffShape(op):
"""Shape function for the ListDiff op."""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
# TODO(mrry): Indicate that the length falls within an interval?
return [tensor_shape.vector(None)] * 2
@ops.RegisterShape("Pad")
@ops.RegisterShape("MirrorPad")
def _PadShape(op):
"""Shape function for the Pad op.
This op has two inputs:
* input: A rank-N tensor.
* paddings: An N-by-2 matrix, in which the i^th row contains the
number of padding elements to add before and after `input` in the
i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in paddings.
Args:
op: A Pad Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape()
input_shape = input_shape.with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(
tensor_shape.matrix(input_shape.ndims, 2))
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("paddings must be non-negative")
output_dims.append(dim + paddings[i, 0] + paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("MirrorPadGrad")
def _MirrorPadGradShape(op):
"""Shape function for the MirrorPadGrad op."""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape().with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(tensor_shape.matrix(
input_shape.ndims, 2))
paddings = tensor_util.constant_value(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("Paddings must be non-negative.")
if dim <= paddings[i, 0] + paddings[i, 1]:
raise ValueError("Output dimension is not positive.")
output_dims.append(dim - paddings[i, 0] - paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("ReverseSequence")
def _ReverseSequenceShape(op):
"""Shape function for the ReverseSequence op.
This op has two inputs:
* input: A rank-N tensor with size B in the 0th dimension.
* seq_lens: A vector of length B.
It has one output, with the same size as input.
Args:
op: A ReverseSequence Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible or seq_dim == batch_dim.
"""
input_shape = op.inputs[0].get_shape()
seq_lens_shape = op.inputs[1].get_shape().with_rank(1)
if input_shape.ndims is None:
return [None]
seq_dim = op.get_attr("seq_dim")
batch_dim = op.get_attr("batch_dim")
if input_shape.ndims is not None:
if batch_dim >= input_shape.ndims:
raise ValueError("batch_dim must be < input.dims() (%d vs %d)" %
(batch_dim, input_shape.ndims))
if seq_dim >= input_shape.ndims:
raise ValueError("seq_dim must be < input.dims() (%d vs %d)" %
(seq_dim, input_shape.ndims))
batch_size = input_shape[batch_dim].merge_with(seq_lens_shape[0])
input_shape = tensor_shape.TensorShape([
value if ix != batch_dim else batch_size
for ix, value in enumerate(input_shape)])
return [input_shape]
@ops.RegisterShape("Shape")
@ops.RegisterShape("ShapeN")
def _ShapeNShape(op):
"""Shape function for the Shape/ShapeN op."""
return [tensor_shape.vector(x.get_shape().ndims) for x in op.inputs]
@ops.RegisterShape("Transpose")
def _TransposeShape(op):
"""Shape function for the Transpose op.
This op takes two inputs:
* input: a rank-N tensor of arbitrary shape.
* shuffle: a length-N vector.
Its output is the rank-N tensor computed by permuting the dimensions
of input according to shuffle.
Args:
op: A Transpose op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input and shuffle are incompatible.
IndexError: If shuffle contains an index that is >= the rank of input.
"""
input_shape = op.inputs[0].get_shape()
transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
input_shape.ndims))
transpose_vec = tensor_util.constant_value(op.inputs[1])
if transpose_vec is None:
return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
else:
return [tensor_shape.TensorShape([input_shape[i]
for i in transpose_vec.tolist()])]
@ops.RegisterShape("Split")
def _SplitShape(op):
"""Shape function for the Split op."""
split_dim = tensor_util.constant_value(op.inputs[0])
num_split = len(op.outputs)
input_shape = op.inputs[1].get_shape()
if split_dim is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split
else:
split_dim = int(split_dim)
input_shape = input_shape.with_rank_at_least(split_dim + 1)
if not (input_shape[split_dim] % num_split).is_compatible_with(0):
raise ValueError(
"Number of ways to split should evenly divide the split "
"dimension but got split_dim %d (size = %d) and num_split %d" %
(split_dim, input_shape[split_dim].value, num_split))
prefix = input_shape[:split_dim]
size_in_split_dim = input_shape[split_dim] // num_split
suffix = input_shape[split_dim + 1:]
output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)
return [output_shape] * num_split
@ops.RegisterShape("Tile")
def _TileShape(op):
"""Shape function for the Tile op.
This op has two inputs:
* input: A rank-N tensor.
* multiples: A length-N vector, in which the i^th element contains
the factor by which `input` will be tiled in the i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in multiples
Args:
op: A Tile Operation.
Returns:
A single-element list containing the shape of the output.
"""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)
multiples = tensor_util.constant_value(op.inputs[1])
if multiples is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
multiples = multiples.ravel()
for i, dim in enumerate(input_shape.dims):
output_dims.append(dim * multiples[i])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("TileGrad")
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
multiples = tensor_util.constant_value(op.inputs[1])
if multiples is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
output_dims.append(dim // multiples[i])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("Where")
def _WhereShape(op):
"""Shape function for the Where op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.matrix(None, input_shape.ndims)]
@ops.RegisterShape("ZerosLike")
def _ZerosLikeShape(op):
"""Shape function for the ZerosLike op."""
return [op.inputs[0].get_shape()]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, ops.SparseTensor):
raise TypeError("Hypothesis must be a SparseTensor")
if not isinstance(truth, ops.SparseTensor):
raise TypeError("Truth must be a SparseTensor")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.shape,
truth.indices,
truth.values,
truth.shape,
normalize=normalize,
name=name)
@ops.RegisterShape("EditDistance")
def _EditDistanceShape(op):
"""Shape function for the EditDistance op."""
hypothesis_shape = tensor_util.constant_value(op.inputs[2])
truth_shape = tensor_util.constant_value(op.inputs[5])
if hypothesis_shape is not None and truth_shape is not None:
if len(hypothesis_shape) != len(truth_shape):
raise ValueError(
"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s" %
(str(hypothesis_shape), str(truth_shape)))
return [tensor_shape.TensorShape(
[max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]
return [tensor_shape.unknown_shape()]
# The remaining ops do not change the shape of their inputs.
@ops.RegisterShape("Quantize")
@ops.RegisterShape("Dequantize")
def _QuantizeDequantizeShape(op):
unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
return common_shapes.unchanged_shape(op)
@ops.RegisterShape("SpaceToBatch")
def _SpaceToBatchShape(op):
"""Shape function for the SpaceToBatch op.
The output shape is determined by the following inputs/ attributes:
* input: A rank-4 tensor with shape [B, H, W, D]
* paddings: A 2-by-2 matrix, specified as follows:
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]],
implying effective padded spatial dimensions:
Hp = pad_top + H + pad_bottom
Wp = pad_left + W + pad_right
Both Hp and Wp must be multiples of block_size.
* block_size: an int.
Its output is also a rank-4 tensor with shape:
[B*block_size*block_size, Hp/block_size, Wp/block_size, D]
Args:
op: A SpaceToBatch op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of inputs are not as expected.
IndexError: If block_size does not divide Wp or Hp.
"""
# Check that the input tensor is 4-D.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 4-D input tensor.")
# Check that the paddings tensor is a matrix with shape [2, 2].
try:
paddings_shape = op.inputs[1].get_shape().with_rank(2)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 2-D paddings tensor.")
if paddings_shape[0] != 2 or paddings_shape[1] != 2:
raise ValueError(
"tf.space_to_batch() requires input paddings with shape [2, 2].")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
paddings = tensor_util.constant_value(op.inputs[1])
if (paddings[0, 0] < 0 or paddings[0, 1] < 0 or
paddings[1, 0] < 0 or paddings[1, 1] < 0):
raise ValueError("paddings cannot be negative.")
input_height = input_shape[1] + paddings[0, 0] + paddings[0, 1]
input_width = input_shape[2] + paddings[1, 0] + paddings[1, 1]
if input_height % block_size > 0 or input_width % block_size > 0:
raise IndexError("block_size needs to divide both width and height.")
batch = input_shape[0] * block_size * block_size
height = input_height // block_size
width = input_width // block_size
depth = input_shape[3]
return [tensor_shape.TensorShape([batch, height, width, depth])]
@ops.RegisterShape("BatchToSpace")
def _BatchToSpaceShape(op):
"""Shape function for the BatchToSpace op.
The output shape is determined by the following inputs/ attributes:
* input: A rank-4 tensor with shape
[B*block_size*block_size, Hp/block_size, Wp/block_size, D]
Note that the batch size of the input tensor must be divisible by
`block_size * block_size`.
* crops: A 2-by-2 matrix, specified as follows:
crops = [[crop_top, crop_bottom], [crop_left, crop_right]].
* block_size: an int.
Its output is also a rank-4 tensor with shape [B, H, W, D], where:
H = Hp - crop_top - crop_bottom
W = Wp - crop_left - crop_right
Args:
op: A BatchToSpace op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of the inputs are not as expected.
IndexError: If block_size*block_size does not divide the input batch size.
"""
# Check that the input tensor is 4-D.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError("tf.batch_to_space() requires 4-D input tensor.")
# Check that the crops tensor is a matrix with shape [2, 2].
try:
crops_shape = op.inputs[1].get_shape().with_rank(2)
except ValueError:
raise ValueError(
"tf.space_to_batch() requires 2-D crops tensor.")
if crops_shape[0] != 2 or crops_shape[1] != 2:
raise ValueError(
"tf.space_to_batch() requires input crops with shape [2, 2].")
crops = tensor_util.constant_value(op.inputs[1])
if (crops[0, 0] < 0 or crops[0, 1] < 0 or
crops[1, 0] < 0 or crops[1, 1] < 0):
raise ValueError("crops cannot be negative.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_batch = input_shape[0]
if input_batch % (block_size * block_size) > 0:
raise IndexError("input batch must be divisible by block_size*block_size.")
batch = input_batch // (block_size * block_size)
height = input_shape[1] * block_size - crops[0, 0] - crops[0, 1]
width = input_shape[2] * block_size - crops[1, 0] - crops[1, 1]
if height <= 0 or width <= 0:
raise ValueError("Output height or width is not positive.")
depth = input_shape[3]
return [tensor_shape.TensorShape([batch, height, width, depth])]
@ops.RegisterShape("SpaceToDepth")
def _SpaceToDepthShape(op):
"""Shape function for the SpaceToDepth op.
This op takes two inputs:
* input: a tensor of shape like that [B, H, W, D]
* block_size: an int.
Its output is the same-rank tensor but with changed
dimensions like that: [B, H/block_size, W/block_size, D*block_size*block_size]
Args:
op: A SpaceToDepth op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input are not as expected.
IndexError: If block_size does not divide W or H.
"""
# Check that the input tensor is of 4 dimensions.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.space_to_depth() requires tensors with exactly 4 dimensions.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_height = input_shape[1]
input_width = input_shape[2]
if (input_width % block_size > 0) or (input_height % block_size > 0):
raise IndexError(
"block_size needs to divide both width and height.")
width = input_width // block_size
height = input_height // block_size
new_depth = input_shape[3] * block_size * block_size
return [tensor_shape.TensorShape(
[input_shape[0], height, width, new_depth])]
@ops.RegisterShape("DepthToSpace")
def _DepthToSpaceShape(op):
"""Shape function for the DepthToSpace op.
This op takes two inputs:
* input: a tensor of shape like that [B, H, W, D]
* block_size: an int.
Its output is the same-rank tensor but with changed
dimensions like that:
[B, H*block_size, W*block_size, D/(block_size*block_size)]
Args:
op: A DepthToSpace op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input are not as expected.
IndexError: If block_size*block_size does not divide D.
"""
# Check that the input tensor is of 4 dimensions.
try:
input_shape = op.inputs[0].get_shape().with_rank(4)
except ValueError:
raise ValueError(
"tf.depth_to_space() requires tensors with exactly 4 dimensions.")
block_size = op.get_attr("block_size")
if block_size <= 1:
raise ValueError("Attribute block_size has to be > 1.")
input_height = input_shape[1]
input_width = input_shape[2]
input_depth = input_shape[3]
width = input_width * block_size
height = input_height * block_size
if input_depth % (block_size * block_size) > 0:
raise IndexError(
"block_size*block_size needs to divide the input depth.")
new_depth = input_depth // (block_size * block_size)
return [tensor_shape.TensorShape(
[input_shape[0], height, width, new_depth])]
def one_hot(indices, depth, on_value=1, off_value=0,
axis=None, dtype=dtypes.float32, name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`. By default, `on_value` is 1,
and `off_value` is 0. The type of the output tensor is specified by `dtype`,
which defaults to `tf.float32`.
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
Examples
=========
Suppose that
```
indices = [0, 2, -1, 1]
depth = 3
on_value = 5.0
off_value = 0.0
axis = -1
```
Then output is `[4 x 3]`:
```
output =
[5.0 0.0 0.0] // one_hot(0)
[0.0 0.0 5.0] // one_hot(2)
[0.0 0.0 0.0] // one_hot(-1)
[0.0 5.0 0.0] // one_hot(1)
```
Suppose that
```
indices = [[0, 2], [1, -1]]
depth = 3
on_value = 1.0
off_value = 0.0
axis = -1
```
Then output is `[2 x 2 x 3]`:
```
output =
[
[1.0, 0.0, 0.0] // one_hot(0)
[0.0, 0.0, 1.0] // one_hot(2)
][
[0.0, 1.0, 0.0] // one_hot(1)
[0.0, 0.0, 0.0] // one_hot(-1)
]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype is `tf.string`
"""
# Check for bad dtype specification
if dtype == dtypes.string:
raise TypeError("dtype must be a numeric type")
with ops.op_scope([indices, depth, on_value, off_value,
axis, dtype], name, "one_hot") as name:
on_value = ops.convert_to_tensor(on_value, dtype=dtype, name="on_value")
off_value = ops.convert_to_tensor(off_value, dtype=dtype, name="off_value")
indices = ops.convert_to_tensor(indices, dtype=dtypes.int64, name="indices")
depth = ops.convert_to_tensor(depth, dtype=dtypes.int32, name="depth")
return gen_array_ops._one_hot(indices, depth, on_value,
off_value, axis, name)
@ops.RegisterShape("OneHot")
def _OneHotShape(op):
"""Shape function for the OneHot op.
It closely follows the code in the .cc implementation.
Args:
op: A OneHot Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: if axis < -1.
"""
indices_shape = op.inputs[0].get_shape()
indices_dims = indices_shape.ndims
depth = tensor_util.constant_value(op.inputs[1])
axis = op.get_attr("axis")
if axis < -1:
raise ValueError("axis must be >= -1")
new_shape = None
if indices_dims is not None:
new_shape = indices_shape.as_list()
new_shape.insert(axis % (indices_dims + 1), depth)
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("PlaceholderWithDefault")
def _PlaceholderWithDefaultShape(op):
"""Shape function for the PlaceholderWithDefault op.
This op acts as an identity when it is not fed (passing through a
default value), but allows the user to feed it with tensors of a
possibly less precise shape than its default value.
Args:
op: A PlaceholderWithDefault `Operation`.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape()
output_shape = tensor_shape.TensorShape(op.get_attr("shape"))
# NOTE(mrry): We don't merge these shapes, because `output_shape`
# may be *less* precise than `input_shape`.
input_shape.assert_is_compatible_with(output_shape)
return [output_shape]
| 32.411059 | 86 | 0.662058 |
f0188fa8f59fa2a85227b30c634c8d386807fe07 | 6,200 | py | Python | cinebot_mini/execution_routine/planner.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | cinebot_mini/execution_routine/planner.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | cinebot_mini/execution_routine/planner.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | from cinebot_mini import TRANSFORMS
from cinebot_mini.robot_abstraction.robot import Robot
from cinebot_mini.geometry_utils import (
ArcNDInterpolator,
Gaze3DInterpolator,
TransformationTree)
from cinebot_mini.web_utils.blender_client import *
import numpy as np
import pickle
def save_planner(planner, fname):
attr_dict = dict()
for attr in planner.DATA_ATTRS:
attr_dict[attr] = getattr(planner, attr)
pickle.dump(attr_dict, open(fname, 'wb'))
def load_planner(planner, fname):
attr_dict = pickle.load(open(fname, 'rb'))
for key, val in attr_dict.items():
setattr(planner, key, val)
class DirectPlanner:
DATA_ATTRS = ["configuration_history"]
def __init__(self, robot: Robot):
self.robot = robot
self.configuration_history = []
self.robot.disable_torque()
def record(self):
self.configuration_history.append(
self.robot.get_joint_angles())
def plan(self, duration=5.0, fps=30.0):
input_config = np.array(self.configuration_history)
interpolator = ArcNDInterpolator(input_config, num_cache=20, input_smoothing=0.0)
arc_lengths = np.linspace(0, interpolator.length(), int(fps*duration))
output_config = interpolator.generate(arc_lengths)
return output_config
def clear(self):
self.configuration_history = []
def blender_visualize(self):
pass
class GazePlanner:
DATA_ATTRS = ["configuration_history", "camera_points", "gaze_points", "plan_cache"]
def __init__(self, robot: Robot, tf_tree: TransformationTree, duration=5.0, fps=30.0):
self.robot = robot
self.tf_tree = tf_tree
self.chain_name = robot.get_chain().name
self.camera_name = TRANSFORMS["robot_camera_name"]
self.end_effector_name = self._robot_chain().links[-1].name
self.duration = duration
self.fps = fps
self.configuration_history = []
self.camera_points = []
self.gaze_points = []
self.plan_cache = []
self.cache_dirty = True
self.robot.disable_torque()
def _robot_chain(self):
return self.tf_tree.chains[self.chain_name]
def _current_camera_pose(self, joint_angles=None):
if joint_angles is None:
joint_angles = self.robot.get_joint_angles()
self.tf_tree.set_chain_state(self.chain_name, joint_angles)
camera_pose = self.tf_tree.get_transform(self.camera_name)
return camera_pose
def record_camera_pose(self):
joint_angles = self.robot.get_joint_angles()
print(joint_angles)
self.configuration_history.append(joint_angles)
camera_pose = self._current_camera_pose(joint_angles)
camera_point = camera_pose[:3, 3]
self.camera_points.append(camera_point)
self.cache_dirty = True
def add_gaze_point(self, gaze_point=None):
if gaze_point is None:
camera_pose = self._current_camera_pose()
camera_point = camera_pose[:3, 3]
gaze_point = camera_point
self.gaze_points.append(gaze_point)
self.cache_dirty = True
def _interpolate_camera(self):
gaze_points_input = self.gaze_points[0]
if len(self.gaze_points) > 3:
gaze_points_input = np.array(self.gaze_points)
interpolator = Gaze3DInterpolator(np.array(self.camera_points), gaze_points_input)
arc_lengths = np.linspace(
0, interpolator.length(), int(self.fps * self.duration))
output_camera_poses = interpolator.generate(arc_lengths)
return output_camera_poses
def plan(self):
if self.cache_dirty is False:
return self.plan_cache
# init to configuration history
output_camera_poses = self._interpolate_camera()
self.tf_tree.set_chain_state(self.chain_name, self.configuration_history[0])
output_configs = []
for i in range(len(output_camera_poses)):
camera_pose = output_camera_poses[i]
# self.tf_tree.set_chain_state(self.chain_name, [0,0,0,0,0,0])
try:
self.tf_tree.set_transform(self.camera_name, camera_pose)
config = self.tf_tree.chain_states[self.chain_name]
output_configs.append(config)
except RuntimeError as e:
config = self.tf_tree.chain_states[self.chain_name]
print("IK Failed at i={}, previous configuration:". format(i), config)
print("Camera pose:", camera_pose)
self.plan_cache = output_configs
self.cache_dirty = False
return output_configs
def blender_animate(self, axis_size=0.05):
frame_transforms = dict()
for frame_name in self.tf_tree.transforms.keys():
frame_transforms[frame_name] = []
if not test_object_exist(frame_name):
create_object(frame_name, type="EMPTY")
set_property(frame_name, "empty_display_size", axis_size)
output_configs = self.plan()
for i in range(len(output_configs)):
self.tf_tree.set_chain_state(self.chain_name, output_configs[i])
for frame_name in self.tf_tree.transforms.keys():
frame_transforms[frame_name].append(
self.tf_tree.get_transform(frame_name))
for frame_name in self.tf_tree.transforms.keys():
set_animation_matrix(frame_name, frame_transforms[frame_name])
def blender_animate_input(self, axis_size=0.05):
point_name = "DEBUG_gaze_point"
camera_name = "DEBUG_camera_point"
for frame_name in [point_name, camera_name]:
if not test_object_exist(frame_name):
create_object(frame_name, type="EMPTY")
set_property(frame_name, "empty_display_size", axis_size)
output_camera_poses = self._interpolate_camera()
point_pose = np.eye(4)
point_pose[:3, 3] = self.gaze_points[0]
point_poses = [point_pose for _ in range(len(output_camera_poses))]
set_animation_matrix(point_name, point_poses)
set_animation_matrix(camera_name, output_camera_poses)
| 37.575758 | 90 | 0.666774 |
e5895c3fe1acf7e17ad5b9a68eee9ed3be76a341 | 2,097 | py | Python | setup.py | New-Math-Data/gytrash | 5ed28cc9bddf009caef347a883cb30b5e01cc8f9 | [
"Apache-2.0"
] | null | null | null | setup.py | New-Math-Data/gytrash | 5ed28cc9bddf009caef347a883cb30b5e01cc8f9 | [
"Apache-2.0"
] | 8 | 2021-07-03T22:23:05.000Z | 2021-07-06T01:06:20.000Z | setup.py | trejas/gytrash | 5ed28cc9bddf009caef347a883cb30b5e01cc8f9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from codecs import open
from setuptools import find_packages, setup
# Import the package module code from local directory.
repo_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, repo_path)
from gytrash import __about__ as about # noqa
packages = find_packages(exclude=["examples",],)
# Open requirements files to grab package dependencies
with open("requirements.txt") as f:
install_requires = [line for line in f if "==" in line]
with open("requirements-test.txt") as f:
tests_require = [line for line in f if "==" in line]
with open("requirements-dev.txt") as f:
dev_requires = [line for line in f if "==" in line]
# Open README file to attach readme as package description.
with open("README.md", "r", "utf-8") as f:
readme = f.read()
setup(
name=about.__title__,
version=about.__version__,
description=about.__description__,
long_description=readme,
long_description_content_type="text/markdown",
author=about.__author__,
author_email=about.__author_email__,
url=about.__url__,
packages=packages,
package_dir={"gytrash": "gytrash"},
package_data={"": []},
include_package_data=True,
python_requires=">=3.7",
install_requires=install_requires,
setup_requires=install_requires,
extras_require={"test": tests_require, "dev": dev_requires,},
license=about.__license__,
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
tests_require=tests_require,
project_urls={"Source": "https://github.com/trejas/gytrash"},
)
| 33.285714 | 70 | 0.686218 |
e097e66d0aeb7437810df766241788908eee3d9f | 1,283 | py | Python | testing/scipy_distutils-0.3.3_34.586/laheyfcompiler.py | fireballpoint1/fortranTOpy | 55843a62c6f0a2f8e2a777ef70193940d3d2d141 | [
"Apache-2.0"
] | 1 | 2018-08-26T05:10:56.000Z | 2018-08-26T05:10:56.000Z | testing/scipy_distutils-0.3.3_34.586/laheyfcompiler.py | fireballpoint1/fortranTOpy | 55843a62c6f0a2f8e2a777ef70193940d3d2d141 | [
"Apache-2.0"
] | null | null | null | testing/scipy_distutils-0.3.3_34.586/laheyfcompiler.py | fireballpoint1/fortranTOpy | 55843a62c6f0a2f8e2a777ef70193940d3d2d141 | [
"Apache-2.0"
] | 1 | 2018-06-26T18:06:44.000Z | 2018-06-26T18:06:44.000Z | import os
import sys
from cpuinfo import cpu
from fcompiler import FCompiler
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["lf95", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95","-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g','--chk','--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d,'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='lahey')
compiler.customize()
print compiler.get_version()
| 27.297872 | 88 | 0.574435 |
8ec4538d4f4511f50472a745bc8763fc3df5831a | 44,808 | py | Python | applications/HDF5Application/tests/test_hdf5_core.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/HDF5Application/tests/test_hdf5_core.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/HDF5Application/tests/test_hdf5_core.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | import KratosMultiphysics
import KratosMultiphysics.HDF5Application as KratosHDF5
from KratosMultiphysics.HDF5Application import core
from KratosMultiphysics.HDF5Application.core.utils import ParametersWrapper
from KratosMultiphysics.HDF5Application.core import controllers
from KratosMultiphysics.HDF5Application.core import operations
from KratosMultiphysics.HDF5Application.core import file_io
import KratosMultiphysics.KratosUnittest as KratosUnittest
import os
from unittest.mock import call, patch, MagicMock
def _SurrogateModelPart():
model_part = MagicMock(spec=KratosMultiphysics.ModelPart)
model_part.ProcessInfo = {KratosMultiphysics.TIME: 1.23456789,
KratosMultiphysics.DELTA_TIME: 0.1}
model_part.Name = 'model_part'
return model_part
class TestFileIO(KratosUnittest.TestCase):
@staticmethod
def _BuildTestFileIOObject(obj):
class FilenameGetter(object):
def Get(self, identifier):
return identifier
obj.filename_getter = FilenameGetter()
obj.file_access_mode = 'exclusive'
obj.file_driver = 'core'
obj.echo_level = 0
@staticmethod
def _FilenameGetterSettings(**kwargs):
settings = ParametersWrapper()
if 'file_name' in kwargs:
settings['file_name'] = kwargs['file_name']
else:
settings['file_name'] = 'kratos.h5'
if 'time_format' in kwargs:
settings['time_format'] = kwargs['time_format']
return settings
def test_FileIO_FileSettings(self):
io = file_io._FileIO()
self._BuildTestFileIOObject(io)
settings = io._FileSettings('kratos.h5')
self.assertEqual(settings['file_name'], 'kratos.h5')
self.assertEqual(settings['file_access_mode'], 'exclusive')
self.assertEqual(settings['file_driver'], 'core')
self.assertEqual(settings['echo_level'], 0)
def test_HDF5SerialFileIO_Creation(self):
io = file_io._HDF5SerialFileIO()
self._BuildTestFileIOObject(io)
obj = io.Get('kratos.h5')
self.assertIsInstance(obj, KratosHDF5.HDF5FileSerial)
def test_SetDefaults(self):
settings = ParametersWrapper()
file_io._SetDefaults(settings)
self.assertTrue(settings.Has('io_type'))
self.assertTrue(settings.Has('file_name'))
self.assertFalse(settings.Has('time_format'))
self.assertTrue(settings.Has('file_access_mode'))
self.assertTrue(settings.Has('file_driver'))
self.assertTrue(settings.Has('echo_level'))
self.assertEqual(settings['io_type'], 'serial_hdf5_file_io')
self.assertEqual(settings['file_name'], 'kratos')
self.assertEqual(settings['file_access_mode'], 'exclusive')
if os.name == 'posix':
self.assertEqual(settings['file_driver'], 'sec2')
elif os.name == 'nt':
self.assertEqual(settings['file_driver'], 'windows')
self.assertEqual(settings['echo_level'], 0)
def test_SetDefaults_NonTerminalTime(self):
settings = ParametersWrapper()
settings['file_name'] = 'kratos-<time>.h5'
file_io._SetDefaults(settings)
self.assertTrue(settings.Has('time_format'))
self.assertEqual(settings['time_format'], '0.4f')
def test_SetDefaults_ParallelIO(self):
settings = ParametersWrapper()
settings.AddEmptyValue('io_type').SetString('parallel_hdf5_file_io')
file_io._SetDefaults(settings)
self.assertEqual(settings['file_driver'], 'mpio')
def test_GetIO_SerialIO(self):
io = file_io._GetIO('serial_hdf5_file_io')
self.assertIsInstance(io, file_io._HDF5SerialFileIO)
def test_GetIO_ParallelIO(self):
io = file_io._GetIO('parallel_hdf5_file_io')
self.assertIsInstance(io, file_io._HDF5ParallelFileIO)
def test_GetIO_MockIO(self):
io = file_io._GetIO('mock_hdf5_file_io')
self.assertIsInstance(io, file_io._HDF5MockFileIO)
def test_GetIO_GarbageInput(self):
with self.assertRaisesRegex(ValueError, r'"io_type" has invalid value "abcdefg"'):
file_io._GetIO('abcdefg')
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FilenameGetter_WithFileExtension(self, mock_class):
settings = self._FilenameGetterSettings(file_name='kratos.h5')
obj = file_io._FilenameGetter(settings)
model_part = _SurrogateModelPart()
obj.Get(model_part)
mock_class.assert_called_once_with(model_part, 'kratos.h5', {'<time>':''})
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FilenameGetter_WithoutFileExtension(self, mock_class):
settings = self._FilenameGetterSettings(file_name='kratos')
obj = file_io._FilenameGetter(settings)
model_part = _SurrogateModelPart()
obj.Get(model_part)
mock_class.assert_called_once_with(model_part, 'kratos.h5', {'<time>':''})
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FilenameGetter_TimeFormat(self, mock_class):
settings = self._FilenameGetterSettings(time_format='0.4f')
obj = file_io._FilenameGetter(settings)
model_part = _SurrogateModelPart()
obj.Get(model_part)
mock_class.assert_called_once_with(model_part, 'kratos.h5', {'<time>':'0.4f'})
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FilenameGetterWithDirectoryInitialization_WithoutDirectory(self, mock_class):
mock_instance = mock_class.return_value
mock_instance.GetFileName.return_value = 'kratos.h5'
settings = self._FilenameGetterSettings()
with patch('os.makedirs', autospec=True) as p:
data_comm = KratosMultiphysics.Testing.GetDefaultDataCommunicator()
obj = file_io._FilenameGetterWithDirectoryInitialization(settings, data_comm)
obj.Get(_SurrogateModelPart())
self.assertEqual(p.call_count, 0)
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FilenameGetterWithDirectoryInitialization_DirectoryExists(self, mock_class):
mock_instance = mock_class.return_value
mock_instance.GetFileName.return_value = '/foo/kratos.h5'
settings = self._FilenameGetterSettings(file_name='/foo/kratos.h5')
patcher1 = patch('os.path.exists', autospec=True)
patcher2 = patch('os.makedirs', autospec=True)
pathexists = patcher1.start()
makedirs = patcher2.start()
pathexists.return_value = True
data_comm = KratosMultiphysics.Testing.GetDefaultDataCommunicator()
obj = file_io._FilenameGetterWithDirectoryInitialization(settings, data_comm)
obj.Get(_SurrogateModelPart())
pathexists.assert_called_once_with('/foo')
self.assertEqual(makedirs.call_count, 0)
patcher1.stop()
patcher2.stop()
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FilenameGetterWithDirectoryInitialization_DirectoryDoesNotExist(self, mock_class):
mock_instance = mock_class.return_value
mock_instance.GetFileName.return_value = '/foo/kratos.h5'
settings = self._FilenameGetterSettings(file_name='/foo/kratos.h5')
patcher1 = patch('os.path.exists', autospec=True)
patcher2 = patch('os.makedirs', autospec=True)
pathexists = patcher1.start()
makedirs = patcher2.start()
pathexists.return_value = False
data_comm = KratosMultiphysics.Testing.GetDefaultDataCommunicator()
obj = file_io._FilenameGetterWithDirectoryInitialization(settings, data_comm)
obj.Get(_SurrogateModelPart())
pathexists.assert_called_once_with('/foo')
makedirs.assert_called_once_with('/foo')
patcher1.stop()
patcher2.stop()
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FileIOMaxFilesToKeepExclusiveNoDeletion(self, mock_class):
mock_class_instance = mock_class.return_value
mock_class_instance.GetSortedFileNamesList.return_value = ['file_1', 'file_2', 'file_3', 'file_4', 'file_5']
settings = self._FilenameGetterSettings(file_name='/foo/kratos.h5')
settings['file_access_mode'] = 'exclusive'
settings['max_files_to_keep'] = 4
obj = file_io._FilenameGetter(settings)
with patch("os.path.isdir", autospec=True) as mock_dir:
mock_dir.return_value = True
with patch("KratosMultiphysics.kratos_utilities.DeleteFileIfExisting", autospec=True) as p:
obj.Get(_SurrogateModelPart())
self.assertEqual(p.call_count, 0)
self.assertEqual(mock_dir.call_args_list, [])
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FileIOMaxFilesToKeepTruncateNoDeletion(self, mock_class):
mock_class_instance = mock_class.return_value
mock_class_instance.GetSortedFileNamesList.return_value = ['file_1', 'file_2', 'file_3']
settings = self._FilenameGetterSettings(file_name='/foo/kratos.h5')
settings['file_access_mode'] = 'truncate'
settings['max_files_to_keep'] = 4
obj = file_io._FilenameGetter(settings)
with patch("pathlib.Path.parents", autospec=True) as mock_path:
mock_is_dir = mock_path.__getitem__().is_dir
mock_is_dir.return_value = True
with patch("KratosMultiphysics.kratos_utilities.DeleteFileIfExisting", autospec=True) as p:
obj.Get(_SurrogateModelPart())
self.assertEqual(p.call_count, 0)
self.assertEqual(mock_is_dir.call_count, 1)
@patch("KratosMultiphysics.FileNameDataCollector", autospec=True)
def test_FileIOMaxFilesToKeepTruncateDeletion(self, mock_class):
mock_class_instance = mock_class.return_value
mock_class_instance.GetSortedFileNamesList.return_value = ['file_1', 'file_2', 'file_3', 'file_4', 'file_5']
settings = self._FilenameGetterSettings(file_name='/foo/kratos.h5')
settings['file_access_mode'] = 'truncate'
settings['max_files_to_keep'] = 4
obj = file_io._FilenameGetter(settings)
with patch("pathlib.Path.parents", autospec=True) as mock_path:
mock_is_dir = mock_path.__getitem__().is_dir
mock_is_dir.return_value = True
with patch("KratosMultiphysics.kratos_utilities.DeleteFileIfExisting", autospec=True) as p:
obj.Get(_SurrogateModelPart())
self.assertEqual(p.call_args_list, [call('file_2'), call('file_3')])
self.assertEqual(mock_is_dir.call_count, 1)
def test_Create_Settings(self):
settings = ParametersWrapper()
data_comm = KratosMultiphysics.Testing.GetDefaultDataCommunicator()
file_io.Create(settings, data_comm)
self.assertTrue(settings.Has('io_type'))
self.assertTrue(settings.Has('file_name'))
self.assertTrue(settings.Has('file_access_mode'))
self.assertTrue(settings.Has('file_driver'))
self.assertTrue(settings.Has('echo_level'))
self.assertEqual(settings['io_type'], 'serial_hdf5_file_io')
self.assertEqual(settings['file_access_mode'], 'exclusive')
if os.name == 'posix':
self.assertEqual(settings['file_driver'], 'sec2')
elif os.name == 'nt':
self.assertEqual(settings['file_driver'], 'windows')
self.assertEqual(settings['echo_level'], 0)
def test_Create_Attributes(self):
settings = ParametersWrapper()
data_comm = KratosMultiphysics.Testing.GetDefaultDataCommunicator()
io = file_io.Create(settings, data_comm)
self.assertIsInstance(io, file_io._HDF5SerialFileIO)
self.assertTrue(hasattr(io, 'filename_getter'))
self.assertEqual(io.file_access_mode, 'exclusive')
if os.name == 'posix':
self.assertEqual(io.file_driver, 'sec2')
elif os.name == 'nt':
self.assertEqual(io.file_driver, 'windows')
self.assertEqual(io.echo_level, 0)
class TestOperations(KratosUnittest.TestCase):
def test_CreateNonExistingOperation(self):
settings = ParametersWrapper()
settings['operation_type'] = 'abcdefg'
with self.assertRaisesRegex(ValueError, r'"operation_type" has invalid value "abcdefg"'):
operations.Create(settings)
def test_Prefix_Literal(self):
model_part = _SurrogateModelPart()
prefix = operations.model_part.Prefix('/ModelData', model_part)
self.assertEqual(prefix, '/ModelData')
def test_Prefix_NonTerminalTime(self):
model_part = _SurrogateModelPart()
prefix = operations.model_part.Prefix('/ModelData-<time>', model_part)
self.assertEqual(prefix, '/ModelData-1.23456789')
def test_Prefix_FormattedNonTerminalTime(self):
model_part = _SurrogateModelPart()
prefix = operations.model_part.Prefix(
'/ModelData-<time>', model_part, '0.2f')
self.assertEqual(prefix, '/ModelData-1.23')
def test_Prefix_NonTerminalIdentifier(self):
model_part = _SurrogateModelPart()
prefix = operations.model_part.Prefix(
'/<model_part_name>-<time>', model_part)
self.assertEqual(prefix, '/model_part-1.23456789')
def test_VariableIO_Settings(self):
settings1 = ParametersWrapper()
variable_io = operations.VariableIO(settings1)
settings2 = variable_io.GetSettings(_SurrogateModelPart())
for settings in [settings1, settings2]:
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertEqual(settings['prefix'], '/ResultsData')
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertEqual(len(settings['list_of_variables']), 0)
def test_VariableIO_GetSettingsWithNonTerminalPrefix(self):
input_settings = ParametersWrapper('''
{
"prefix": "/ModelData/<model_part_name>/<time>",
"time_format": "0.2f"
}
''')
variable_io = operations.VariableIO(input_settings)
settings = variable_io.GetSettings(_SurrogateModelPart())
self.assertEqual(settings['prefix'], '/ModelData/model_part/1.23')
def test_ModelPartOutput(self):
settings = ParametersWrapper()
model_part_output = operations.Create(settings)
self.assertTrue(settings.Has('operation_type'))
self.assertTrue(settings.Has('prefix'))
self.assertEqual(settings['operation_type'], 'model_part_output')
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ModelPartIO', autospec=True) as p:
model_part_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
model_part_output(model_part, hdf5_file)
p.assert_called_once_with(hdf5_file, '/ModelData')
model_part_io.WriteModelPart.assert_called_once_with(model_part)
def test_ModelPartOutput_NonTerminalPrefix(self):
settings = ParametersWrapper('''
{
"operation_type": "model_part_output",
"prefix": "/ModelData/<model_part_name>/<time>",
"time_format": "0.2f"
}
''')
model_part_output = operations.Create(settings)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ModelPartIO', autospec=True) as p:
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
model_part_output(model_part, hdf5_file)
args, _ = p.call_args
self.assertEqual(args[1], '/ModelData/model_part/1.23')
def test_NodalSolutionStepDataOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'nodal_solution_step_data_output'
nodal_solution_step_data_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(
nodal_solution_step_data_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalSolutionStepDataIO', autospec=True) as p:
nodal_solution_step_data_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
nodal_solution_step_data_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_solution_step_data_io.WriteNodalResults.call_count, 1)
def test_NodalSolutionStepDataInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'nodal_solution_step_data_input'
nodal_solution_step_data_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(
nodal_solution_step_data_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalSolutionStepDataIO', autospec=True) as p:
nodal_solution_step_data_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
nodal_solution_step_data_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_solution_step_data_io.ReadNodalResults.call_count, 1)
def test_NodalDataValueOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'nodal_data_value_output'
nodal_data_value_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(nodal_data_value_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalDataValueIO', autospec=True) as p:
nodal_data_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
nodal_data_value_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_data_value_io.WriteNodalResults.call_count, 1)
def test_NodalFlagValueOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'nodal_flag_value_output'
nodal_flag_value_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(nodal_flag_value_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalFlagValueIO', autospec=True) as p:
nodal_flag_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
nodal_flag_value_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_flag_value_io.WriteNodalFlags.call_count, 1)
def test_NodalDataValueInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'nodal_data_value_input'
nodal_data_value_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(nodal_data_value_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalDataValueIO', autospec=True) as p:
nodal_data_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
nodal_data_value_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_data_value_io.ReadNodalResults.call_count, 1)
def test_NodalFlagValueInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'nodal_flag_value_input'
nodal_flag_value_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(nodal_flag_value_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalFlagValueIO', autospec=True) as p:
nodal_flag_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
nodal_flag_value_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_flag_value_io.ReadNodalFlags.call_count, 1)
def test_ElementDataValueOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'element_data_value_output'
element_data_value_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(element_data_value_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ElementDataValueIO', autospec=True) as p:
element_data_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
element_data_value_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(element_data_value_io.WriteElementResults.call_count, 1)
def test_ElementFlagValueOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'element_flag_value_output'
element_flag_value_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(element_flag_value_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ElementFlagValueIO', autospec=True) as p:
element_flag_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
element_flag_value_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(element_flag_value_io.WriteElementFlags.call_count, 1)
def test_ElementDataValueInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'element_data_value_input'
element_data_value_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(element_data_value_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ElementDataValueIO', autospec=True) as p:
element_data_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
element_data_value_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(element_data_value_io.ReadElementResults.call_count, 1)
def test_ElementFlagValueInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'element_flag_value_input'
element_flag_value_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(element_flag_value_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ElementFlagValueIO', autospec=True) as p:
element_flag_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
element_flag_value_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(element_flag_value_io.ReadElementFlags.call_count, 1)
def test_ConditionDataValueOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'condition_data_value_output'
condition_data_value_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(condition_data_value_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ConditionDataValueIO', autospec=True) as p:
condition_data_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
condition_data_value_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(condition_data_value_io.WriteConditionResults.call_count, 1)
def test_ConditionFlagValueOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'condition_flag_value_output'
condition_flag_value_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(condition_flag_value_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ConditionFlagValueIO', autospec=True) as p:
condition_flag_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
condition_flag_value_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(condition_flag_value_io.WriteConditionFlags.call_count, 1)
def test_ConditionDataValueInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'condition_data_value_input'
condition_data_value_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(condition_data_value_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ConditionDataValueIO', autospec=True) as p:
condition_data_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
condition_data_value_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(condition_data_value_io.ReadConditionResults.call_count, 1)
def test_ConditionFlagValueInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'condition_flag_value_input'
condition_flag_value_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(condition_flag_value_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ConditionFlagValueIO', autospec=True) as p:
condition_flag_value_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
condition_flag_value_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(condition_flag_value_io.ReadConditionFlags.call_count, 1)
def test_PrimalBossakOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'primal_bossak_output'
primal_bossak_output = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(primal_bossak_output, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalSolutionStepBossakIO', autospec=True) as p:
nodal_solution_step_bossak_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
primal_bossak_output(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_solution_step_bossak_io.WriteNodalResults.call_count, 1)
def test_PrimalBossakInput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'primal_bossak_input'
primal_bossak_input = operations.Create(settings)
self.assertTrue(settings.Has('prefix'))
self.assertTrue(settings.Has('list_of_variables'))
self.assertTrue(settings['list_of_variables'].IsArray())
self.assertIsInstance(primal_bossak_input, operations.VariableIO)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5NodalSolutionStepBossakIO', autospec=True) as p:
nodal_solution_step_bossak_io = p.return_value
model_part = _SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileSerial)
primal_bossak_input(model_part, hdf5_file)
self.assertEqual(p.call_count, 1)
self.assertEqual(nodal_solution_step_bossak_io.ReadNodalResults.call_count, 1)
class TestControllers(KratosUnittest.TestCase):
def test_CreateNonExistingController(self):
settings = ParametersWrapper()
settings['controller_type'] = 'abcdefg'
with self.assertRaisesRegex(ValueError, r'"controller_type" has invalid value "abcdefg"'):
controllers.Create(MagicMock(), MagicMock(), settings)
@patch('KratosMultiphysics.FileNameDataCollector', autospec=True)
def test_DefaultController(self, mock_class):
mock_instance = mock_class.return_value
mock_instance.GetFileName.return_value = 'kratos.h5'
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
io_settings = ParametersWrapper()
controller_settings = ParametersWrapper()
with patch('KratosMultiphysics.HDF5Application.core.file_io.KratosHDF5.HDF5FileSerial', autospec=True):
io = file_io.Create(io_settings, data_comm)
controller = controllers.Create(
model_part, io, controller_settings)
self.assertTrue(controller_settings.Has('controller_type'))
self.assertEqual(
controller_settings['controller_type'], 'default_controller')
self.assertEqual(controller.model_part, model_part)
self.assertEqual(controller.io, io)
operation = MagicMock(spec=operations.ModelPartOutput)
controller()
controller.Add(operation)
for i in range(10):
controller()
self.assertEqual(operation.call_count, 10)
def test_TemporalController_CreateWithDefaults(self):
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
io = file_io.Create(ParametersWrapper(), data_comm)
controller_settings = ParametersWrapper()
controller_settings['controller_type'] = 'temporal_controller'
controller = controllers.Create(model_part, io, controller_settings)
self.assertEqual(controller.model_part, model_part)
self.assertEqual(controller.io, io)
self.assertEqual(controller.time_frequency, 1.0)
self.assertEqual(controller.step_frequency, 1)
self.assertEqual(controller.current_time, 0.0)
self.assertEqual(controller.current_step, 0)
def test_TemporalController_CreateWithParameters(self):
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
io = file_io.Create(ParametersWrapper(), data_comm)
controller_settings = ParametersWrapper()
controller_settings['controller_type'] = 'temporal_controller'
controller_settings['time_frequency'] = 2.0
controller_settings['step_frequency'] = 3
controller = controllers.Create(model_part, io, controller_settings)
self.assertEqual(controller.time_frequency, 2.0)
self.assertEqual(controller.step_frequency, 3)
def test_TemporalController_StepFrequency(self):
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
controller_settings = ParametersWrapper()
controller_settings['step_frequency'] = 2
controller_settings['controller_type'] = 'temporal_controller'
with patch('KratosMultiphysics.HDF5Application.core.file_io._HDF5SerialFileIO', autospec=True):
io = file_io.Create(ParametersWrapper(), data_comm)
controller = controllers.Create(
model_part, io, controller_settings)
for i in range(10):
controller()
io.Get.assert_called_with(model_part)
self.assertEqual(io.Get.call_count, 5)
def test_TemporalController_TimeFrequency(self):
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
controller_settings = ParametersWrapper()
controller_settings['step_frequency'] = 100
controller_settings['time_frequency'] = 0.5
controller_settings['controller_type'] = 'temporal_controller'
with patch('KratosMultiphysics.HDF5Application.core.file_io._HDF5SerialFileIO', autospec=True):
io = file_io.Create(ParametersWrapper(), data_comm)
controller = controllers.Create(
model_part, io, controller_settings)
for i in range(10):
controller()
io.Get.assert_called_with(model_part)
self.assertEqual(io.Get.call_count, 2)
def test_TemporalController_NearlyTheSameTimeFrequency(self):
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
controller_settings = ParametersWrapper()
controller_settings['step_frequency'] = 100
controller_settings['time_frequency'] = 0.2000001
controller_settings['controller_type'] = 'temporal_controller'
with patch('KratosMultiphysics.HDF5Application.core.file_io._HDF5SerialFileIO', autospec=True):
io = file_io.Create(ParametersWrapper(), data_comm)
controller = controllers.Create(
model_part, io, controller_settings)
for _ in range(10):
controller()
io.Get.assert_called_with(model_part)
self.assertEqual(io.Get.call_count, 5)
@patch('KratosMultiphysics.FileNameDataCollector', autospec=True)
def test_TemporalController_OperationCall(self, mock_class):
mock_instance = mock_class.return_value
mock_instance.GetFileName.return_value = 'kratos.h5'
model_part = _SurrogateModelPart()
data_comm = model_part.GetCommunicator().GetDataCommunicator()
controller_settings = ParametersWrapper()
controller_settings['controller_type'] = 'temporal_controller'
io = file_io.Create(ParametersWrapper(), data_comm)
operation = MagicMock(spec=operations.ModelPartOutput)
controller = controllers.Create(
model_part, io, controller_settings)
controller.Add(operation)
with patch('KratosMultiphysics.HDF5Application.core.file_io.KratosHDF5.HDF5FileSerial', autospec=True):
for _ in range(10):
controller()
self.assertEqual(operation.call_count, 10)
class TestFactory(KratosUnittest.TestCase):
def test_NonArraySettings(self):
model = KratosMultiphysics.Model()
settings = KratosMultiphysics.Parameters()
with self.assertRaisesRegex(ValueError, r'Expected settings as an array'):
core.Factory(settings, model)
def test_EmptyArraySettings(self):
model = KratosMultiphysics.Model()
settings = KratosMultiphysics.Parameters('''
{
"list_of_controllers" : []
}
''')
settings = ParametersWrapper(settings)
with self.assertRaisesRegex(RuntimeError, '"PLEASE_SPECIFY_MODEL_PART_NAME" was not found'):
core.Factory(settings['list_of_controllers'], model)
def test_DefaultSettings(self):
model = KratosMultiphysics.Model()
model.CreateModelPart('test')
parent_settings = KratosMultiphysics.Parameters('''
{
"list_of_controllers" : [
{
"model_part_name" : "test"
}
]
}
''')
parent_settings = ParametersWrapper(parent_settings)
core.Factory(parent_settings['list_of_controllers'], model)
settings = parent_settings['list_of_controllers'][0]
self.assertTrue(settings.Has('model_part_name'))
self.assertTrue(settings.Has('process_step'))
self.assertTrue(settings.Has('controller_settings'))
self.assertTrue(settings.Has('io_settings'))
self.assertTrue(settings.Has('list_of_operations'))
self.assertTrue(settings['list_of_operations'].IsArray())
self.assertEqual(settings['list_of_operations'].size(), 1)
self.assertTrue(settings['list_of_operations']
[0].Has('operation_type'))
self.assertTrue(settings['list_of_operations'][0].Has('prefix'))
def test_DefaultProcess(self):
model = KratosMultiphysics.Model()
model_part = model.CreateModelPart('test')
parent_settings = KratosMultiphysics.Parameters('''
{
"list_of_controllers" : [
{
"model_part_name" : "test"
}
]
}
''')
parent_settings = ParametersWrapper(parent_settings)
process = core.Factory(
parent_settings['list_of_controllers'], model)
patcher1 = patch(
'KratosMultiphysics.HDF5Application.core.file_io.KratosHDF5.HDF5FileSerial', autospec=True)
patcher2 = patch(
'KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5ModelPartIO', autospec=True)
patcher1.start()
MockHDF5ModelPartIO = patcher2.start()
process.ExecuteInitialize()
model_part_io = MockHDF5ModelPartIO.return_value
model_part_io.WriteModelPart.assert_called_once_with(model_part)
patcher1.stop()
patcher2.stop()
class TestParametersWrapper(KratosUnittest.TestCase):
def setUp(self):
self.set_params = KratosMultiphysics.Parameters()
self.get_params = KratosMultiphysics.Parameters(
'''
{
"string_value" : "abc",
"int_value": 1,
"double_value": 1.5,
"bool_value": true,
"parameters" : {
"double_value": 3.1
},
"array_of_double_values": [1.1, 2.2, 3.3],
"array_of_parameters": [{
"int_value": 2
},{
"double_value": 2.7
}]
}
'''
)
def test_get_string(self):
settings = core.ParametersWrapper(self.get_params)
self.assertEqual(settings['string_value'], 'abc')
def test_get_int(self):
settings = core.ParametersWrapper(self.get_params)
self.assertEqual(settings['int_value'], 1)
def test_get_double(self):
settings = core.ParametersWrapper(self.get_params)
self.assertAlmostEqual(settings['double_value'], 1.5)
def test_get_bool(self):
settings = core.ParametersWrapper(self.get_params)
self.assertEqual(settings['bool_value'], True)
def test_get_parameters(self):
settings = core.ParametersWrapper(self.get_params)
self.assertAlmostEqual(settings['parameters']['double_value'], 3.1)
def test_get_array_of_values(self):
settings = core.ParametersWrapper(self.get_params)
self.assertAlmostEqual(settings['array_of_double_values'][0], 1.1)
self.assertAlmostEqual(settings['array_of_double_values'][1], 2.2)
self.assertAlmostEqual(settings['array_of_double_values'][2], 3.3)
def test_get_array_of_parameters(self):
settings = core.ParametersWrapper(self.get_params)
self.assertEqual(settings['array_of_parameters'][0]['int_value'], 2)
self.assertEqual(settings['array_of_parameters'][1]['double_value'], 2.7)
def test_set_string(self):
settings = core.ParametersWrapper(self.set_params)
settings['string_value'] = 'abc'
self.assertEqual(settings['string_value'], 'abc')
def test_set_int(self):
settings = core.ParametersWrapper(self.set_params)
settings['int_value'] = 1
self.assertEqual(settings['int_value'], 1)
def test_set_double(self):
settings = core.ParametersWrapper(self.set_params)
settings['double_value'] = 1.5
self.assertEqual(settings['double_value'], 1.5)
def test_set_bool(self):
settings = core.ParametersWrapper(self.set_params)
settings['bool_value'] = True
self.assertEqual(settings['bool_value'], True)
def test_set_parameters(self):
settings = core.ParametersWrapper(self.set_params)
settings['parameters'] = KratosMultiphysics.Parameters(
'''
{
"bool_value": false
}
'''
)
self.assertAlmostEqual(settings['parameters']['bool_value'], False)
def test_set_array_of_values(self):
settings = core.ParametersWrapper(self.set_params)
settings['array_of_bool_values'] = [True, False]
self.assertAlmostEqual(settings['array_of_bool_values'][0], True)
self.assertAlmostEqual(settings['array_of_bool_values'][1], False)
def test_set_array_of_parameters(self):
settings = core.ParametersWrapper(self.set_params)
settings['array_of_parameters'] = [
self.get_params['array_of_parameters'][0],
self.get_params['array_of_parameters'][1]
]
self.assertEqual(settings['array_of_parameters'][0]['int_value'], 2)
self.assertEqual(settings['array_of_parameters'][1]['double_value'], 2.7)
def test_array_keys(self):
settings = ParametersWrapper(self.get_params)
count = 0
for k in settings['array_of_double_values']:
self.assertEqual(k, count)
count += 1
def test_nonarray_keys(self):
settings = ParametersWrapper(self.get_params)
for k in settings['parameters']:
self.assertEqual(k, 'double_value')
if __name__ == "__main__":
KratosUnittest.main()
| 49.23956 | 134 | 0.691863 |
170c6232c4ecbc489b80136dacb8f06cbc2da53b | 319 | py | Python | heroku_app/tests/conftest.py | DMD333/Testing_Websites | 132ed35409505a69a67f35bcc298ea8258c9a008 | [
"MIT"
] | null | null | null | heroku_app/tests/conftest.py | DMD333/Testing_Websites | 132ed35409505a69a67f35bcc298ea8258c9a008 | [
"MIT"
] | null | null | null | heroku_app/tests/conftest.py | DMD333/Testing_Websites | 132ed35409505a69a67f35bcc298ea8258c9a008 | [
"MIT"
] | null | null | null | import pytest
import selenium.webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture
def browser():
driver = selenium.webdriver.Chrome(service=Service(ChromeDriverManager().install()))
driver.implicitly_wait(3)
yield driver
| 29 | 88 | 0.811912 |
a296a5c0096b3cdf145e2d5e549f7154de269a32 | 177 | py | Python | doxybook/templates/pages.py | matusnovak/doxybook | 6d0e2b93caf3f35cf242cdc95c65230e89cb975b | [
"MIT"
] | 30 | 2018-09-06T11:26:02.000Z | 2022-03-24T11:30:50.000Z | doxybook/templates/pages.py | matusnovak/doxybook | 6d0e2b93caf3f35cf242cdc95c65230e89cb975b | [
"MIT"
] | 10 | 2018-06-20T11:29:59.000Z | 2021-07-31T11:20:04.000Z | doxybook/templates/pages.py | matusnovak/doxybook | 6d0e2b93caf3f35cf242cdc95c65230e89cb975b | [
"MIT"
] | 10 | 2018-04-12T11:27:51.000Z | 2021-12-20T12:17:03.000Z | TEMPLATE = """
# Related Pages
Here is a list of all related documentation pages:
{% for page in nodes -%}
* [*{{page.title}}*]({{page.url}}) {{page.brief}}
{% endfor -%}
"""
| 17.7 | 50 | 0.59887 |
4d268377636b97a94f891c5bad3338857484b1a5 | 46,345 | py | Python | src/bondora_api/models/second_market_request.py | parruc/bondora_api | f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d | [
"Apache-2.0"
] | 8 | 2019-03-09T20:38:27.000Z | 2021-02-10T20:44:22.000Z | src/bondora_api/models/second_market_request.py | parruc/bondora_api | f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d | [
"Apache-2.0"
] | 1 | 2018-03-06T09:44:21.000Z | 2018-03-06T09:44:21.000Z | src/bondora_api/models/second_market_request.py | parruc/bondora_api | f36ea8d7149d75a2e5f14a695e5a4e57f0a3518d | [
"Apache-2.0"
] | 3 | 2019-06-03T13:44:05.000Z | 2020-11-16T13:17:38.000Z | # coding: utf-8
"""
Bondora API V1
Bondora API version 1
OpenAPI spec version: v1
Contact: investor@bondora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class SecondMarketRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, loan_issued_date_from=None, loan_issued_date_to=None, principal_min=None, principal_max=None, interest_min=None, interest_max=None, length_max=None, length_min=None, has_debt=None, loan_status_code=None, loan_debt_management_stage_type=None, loan_debt_management_date_active_from=None, loan_debt_management_date_active_to=None, late_principal_amount_min=None, late_principal_amount_max=None, price_min=None, price_max=None, use_of_loan=None, has_new_schedule=None, countries=None, ratings=None, credit_score_min=None, credit_score_max=None, user_name=None, gender=None, age_min=None, age_max=None, income_verification_status=None, show_my_items=None, auction_id=None, listed_on_date_from=None, listed_on_date_to=None, debt_occured_on_from=None, debt_occured_on_to=None, debt_occured_on_for_secondary_from=None, debt_occured_on_for_secondary_to=None, defaulted_date_from=None, defaulted_date_to=None, rescheduled_from=None, rescheduled_to=None, last_payment_date_from=None, last_payment_date_to=None, next_payment_date_from=None, next_payment_date_to=None, desired_discount_rate_min=None, desired_discount_rate_max=None, xirr_min=None, xirr_max=None, page_size=None, page_nr=None):
"""
SecondMarketRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'loan_issued_date_from': 'datetime',
'loan_issued_date_to': 'datetime',
'principal_min': 'float',
'principal_max': 'float',
'interest_min': 'float',
'interest_max': 'float',
'length_max': 'int',
'length_min': 'int',
'has_debt': 'bool',
'loan_status_code': 'list[int]',
'loan_debt_management_stage_type': 'int',
'loan_debt_management_date_active_from': 'datetime',
'loan_debt_management_date_active_to': 'datetime',
'late_principal_amount_min': 'float',
'late_principal_amount_max': 'float',
'price_min': 'float',
'price_max': 'float',
'use_of_loan': 'int',
'has_new_schedule': 'bool',
'countries': 'list[str]',
'ratings': 'list[str]',
'credit_score_min': 'int',
'credit_score_max': 'int',
'user_name': 'str',
'gender': 'int',
'age_min': 'int',
'age_max': 'int',
'income_verification_status': 'int',
'show_my_items': 'bool',
'auction_id': 'str',
'listed_on_date_from': 'datetime',
'listed_on_date_to': 'datetime',
'debt_occured_on_from': 'datetime',
'debt_occured_on_to': 'datetime',
'debt_occured_on_for_secondary_from': 'datetime',
'debt_occured_on_for_secondary_to': 'datetime',
'defaulted_date_from': 'datetime',
'defaulted_date_to': 'datetime',
'rescheduled_from': 'datetime',
'rescheduled_to': 'datetime',
'last_payment_date_from': 'datetime',
'last_payment_date_to': 'datetime',
'next_payment_date_from': 'datetime',
'next_payment_date_to': 'datetime',
'desired_discount_rate_min': 'float',
'desired_discount_rate_max': 'float',
'xirr_min': 'float',
'xirr_max': 'float',
'page_size': 'int',
'page_nr': 'int'
}
self.attribute_map = {
'loan_issued_date_from': 'LoanIssuedDateFrom',
'loan_issued_date_to': 'LoanIssuedDateTo',
'principal_min': 'PrincipalMin',
'principal_max': 'PrincipalMax',
'interest_min': 'InterestMin',
'interest_max': 'InterestMax',
'length_max': 'LengthMax',
'length_min': 'LengthMin',
'has_debt': 'HasDebt',
'loan_status_code': 'LoanStatusCode',
'loan_debt_management_stage_type': 'LoanDebtManagementStageType',
'loan_debt_management_date_active_from': 'LoanDebtManagementDateActiveFrom',
'loan_debt_management_date_active_to': 'LoanDebtManagementDateActiveTo',
'late_principal_amount_min': 'LatePrincipalAmountMin',
'late_principal_amount_max': 'LatePrincipalAmountMax',
'price_min': 'PriceMin',
'price_max': 'PriceMax',
'use_of_loan': 'UseOfLoan',
'has_new_schedule': 'HasNewSchedule',
'countries': 'Countries',
'ratings': 'Ratings',
'credit_score_min': 'CreditScoreMin',
'credit_score_max': 'CreditScoreMax',
'user_name': 'UserName',
'gender': 'Gender',
'age_min': 'AgeMin',
'age_max': 'AgeMax',
'income_verification_status': 'IncomeVerificationStatus',
'show_my_items': 'ShowMyItems',
'auction_id': 'AuctionId',
'listed_on_date_from': 'ListedOnDateFrom',
'listed_on_date_to': 'ListedOnDateTo',
'debt_occured_on_from': 'DebtOccuredOnFrom',
'debt_occured_on_to': 'DebtOccuredOnTo',
'debt_occured_on_for_secondary_from': 'DebtOccuredOnForSecondaryFrom',
'debt_occured_on_for_secondary_to': 'DebtOccuredOnForSecondaryTo',
'defaulted_date_from': 'DefaultedDateFrom',
'defaulted_date_to': 'DefaultedDateTo',
'rescheduled_from': 'RescheduledFrom',
'rescheduled_to': 'RescheduledTo',
'last_payment_date_from': 'LastPaymentDateFrom',
'last_payment_date_to': 'LastPaymentDateTo',
'next_payment_date_from': 'NextPaymentDateFrom',
'next_payment_date_to': 'NextPaymentDateTo',
'desired_discount_rate_min': 'DesiredDiscountRateMin',
'desired_discount_rate_max': 'DesiredDiscountRateMax',
'xirr_min': 'XirrMin',
'xirr_max': 'XirrMax',
'page_size': 'PageSize',
'page_nr': 'PageNr'
}
self._loan_issued_date_from = loan_issued_date_from
self._loan_issued_date_to = loan_issued_date_to
self._principal_min = principal_min
self._principal_max = principal_max
self._interest_min = interest_min
self._interest_max = interest_max
self._length_max = length_max
self._length_min = length_min
self._has_debt = has_debt
self._loan_status_code = loan_status_code
self._loan_debt_management_stage_type = loan_debt_management_stage_type
self._loan_debt_management_date_active_from = loan_debt_management_date_active_from
self._loan_debt_management_date_active_to = loan_debt_management_date_active_to
self._late_principal_amount_min = late_principal_amount_min
self._late_principal_amount_max = late_principal_amount_max
self._price_min = price_min
self._price_max = price_max
self._use_of_loan = use_of_loan
self._has_new_schedule = has_new_schedule
self._countries = countries
self._ratings = ratings
self._credit_score_min = credit_score_min
self._credit_score_max = credit_score_max
self._user_name = user_name
self._gender = gender
self._age_min = age_min
self._age_max = age_max
self._income_verification_status = income_verification_status
self._show_my_items = show_my_items
self._auction_id = auction_id
self._listed_on_date_from = listed_on_date_from
self._listed_on_date_to = listed_on_date_to
self._debt_occured_on_from = debt_occured_on_from
self._debt_occured_on_to = debt_occured_on_to
self._debt_occured_on_for_secondary_from = debt_occured_on_for_secondary_from
self._debt_occured_on_for_secondary_to = debt_occured_on_for_secondary_to
self._defaulted_date_from = defaulted_date_from
self._defaulted_date_to = defaulted_date_to
self._rescheduled_from = rescheduled_from
self._rescheduled_to = rescheduled_to
self._last_payment_date_from = last_payment_date_from
self._last_payment_date_to = last_payment_date_to
self._next_payment_date_from = next_payment_date_from
self._next_payment_date_to = next_payment_date_to
self._desired_discount_rate_min = desired_discount_rate_min
self._desired_discount_rate_max = desired_discount_rate_max
self._xirr_min = xirr_min
self._xirr_max = xirr_max
self._page_size = page_size
self._page_nr = page_nr
@property
def loan_issued_date_from(self):
"""
Gets the loan_issued_date_from of this SecondMarketRequest.
Loan issued start date from
:return: The loan_issued_date_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._loan_issued_date_from
@loan_issued_date_from.setter
def loan_issued_date_from(self, loan_issued_date_from):
"""
Sets the loan_issued_date_from of this SecondMarketRequest.
Loan issued start date from
:param loan_issued_date_from: The loan_issued_date_from of this SecondMarketRequest.
:type: datetime
"""
self._loan_issued_date_from = loan_issued_date_from
@property
def loan_issued_date_to(self):
"""
Gets the loan_issued_date_to of this SecondMarketRequest.
Loan issued start date to
:return: The loan_issued_date_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._loan_issued_date_to
@loan_issued_date_to.setter
def loan_issued_date_to(self, loan_issued_date_to):
"""
Sets the loan_issued_date_to of this SecondMarketRequest.
Loan issued start date to
:param loan_issued_date_to: The loan_issued_date_to of this SecondMarketRequest.
:type: datetime
"""
self._loan_issued_date_to = loan_issued_date_to
@property
def principal_min(self):
"""
Gets the principal_min of this SecondMarketRequest.
Remaining principal amount min
:return: The principal_min of this SecondMarketRequest.
:rtype: float
"""
return self._principal_min
@principal_min.setter
def principal_min(self, principal_min):
"""
Sets the principal_min of this SecondMarketRequest.
Remaining principal amount min
:param principal_min: The principal_min of this SecondMarketRequest.
:type: float
"""
self._principal_min = principal_min
@property
def principal_max(self):
"""
Gets the principal_max of this SecondMarketRequest.
Remaining principal amount max
:return: The principal_max of this SecondMarketRequest.
:rtype: float
"""
return self._principal_max
@principal_max.setter
def principal_max(self, principal_max):
"""
Sets the principal_max of this SecondMarketRequest.
Remaining principal amount max
:param principal_max: The principal_max of this SecondMarketRequest.
:type: float
"""
self._principal_max = principal_max
@property
def interest_min(self):
"""
Gets the interest_min of this SecondMarketRequest.
Interest rate min
:return: The interest_min of this SecondMarketRequest.
:rtype: float
"""
return self._interest_min
@interest_min.setter
def interest_min(self, interest_min):
"""
Sets the interest_min of this SecondMarketRequest.
Interest rate min
:param interest_min: The interest_min of this SecondMarketRequest.
:type: float
"""
self._interest_min = interest_min
@property
def interest_max(self):
"""
Gets the interest_max of this SecondMarketRequest.
Interest rate max
:return: The interest_max of this SecondMarketRequest.
:rtype: float
"""
return self._interest_max
@interest_max.setter
def interest_max(self, interest_max):
"""
Sets the interest_max of this SecondMarketRequest.
Interest rate max
:param interest_max: The interest_max of this SecondMarketRequest.
:type: float
"""
self._interest_max = interest_max
@property
def length_max(self):
"""
Gets the length_max of this SecondMarketRequest.
Loan lenght min
:return: The length_max of this SecondMarketRequest.
:rtype: int
"""
return self._length_max
@length_max.setter
def length_max(self, length_max):
"""
Sets the length_max of this SecondMarketRequest.
Loan lenght min
:param length_max: The length_max of this SecondMarketRequest.
:type: int
"""
self._length_max = length_max
@property
def length_min(self):
"""
Gets the length_min of this SecondMarketRequest.
Loan lenght max
:return: The length_min of this SecondMarketRequest.
:rtype: int
"""
return self._length_min
@length_min.setter
def length_min(self, length_min):
"""
Sets the length_min of this SecondMarketRequest.
Loan lenght max
:param length_min: The length_min of this SecondMarketRequest.
:type: int
"""
self._length_min = length_min
@property
def has_debt(self):
"""
Gets the has_debt of this SecondMarketRequest.
Is overdue
:return: The has_debt of this SecondMarketRequest.
:rtype: bool
"""
return self._has_debt
@has_debt.setter
def has_debt(self, has_debt):
"""
Sets the has_debt of this SecondMarketRequest.
Is overdue
:param has_debt: The has_debt of this SecondMarketRequest.
:type: bool
"""
self._has_debt = has_debt
@property
def loan_status_code(self):
"""
Gets the loan_status_code of this SecondMarketRequest.
Loan status code <para>2 Current</para><para>100 Overdue</para><para>5 60+ days overdue</para>
:return: The loan_status_code of this SecondMarketRequest.
:rtype: list[int]
"""
return self._loan_status_code
@loan_status_code.setter
def loan_status_code(self, loan_status_code):
"""
Sets the loan_status_code of this SecondMarketRequest.
Loan status code <para>2 Current</para><para>100 Overdue</para><para>5 60+ days overdue</para>
:param loan_status_code: The loan_status_code of this SecondMarketRequest.
:type: list[int]
"""
self._loan_status_code = loan_status_code
@property
def loan_debt_management_stage_type(self):
"""
Gets the loan_debt_management_stage_type of this SecondMarketRequest.
Latest debt management stage type
:return: The loan_debt_management_stage_type of this SecondMarketRequest.
:rtype: int
"""
return self._loan_debt_management_stage_type
@loan_debt_management_stage_type.setter
def loan_debt_management_stage_type(self, loan_debt_management_stage_type):
"""
Sets the loan_debt_management_stage_type of this SecondMarketRequest.
Latest debt management stage type
:param loan_debt_management_stage_type: The loan_debt_management_stage_type of this SecondMarketRequest.
:type: int
"""
allowed_values = [1, 2, 3, -1]
if loan_debt_management_stage_type not in allowed_values:
raise ValueError(
"Invalid value for `loan_debt_management_stage_type` ({0}), must be one of {1}"
.format(loan_debt_management_stage_type, allowed_values)
)
self._loan_debt_management_stage_type = loan_debt_management_stage_type
@property
def loan_debt_management_date_active_from(self):
"""
Gets the loan_debt_management_date_active_from of this SecondMarketRequest.
Latest debt management date active from
:return: The loan_debt_management_date_active_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._loan_debt_management_date_active_from
@loan_debt_management_date_active_from.setter
def loan_debt_management_date_active_from(self, loan_debt_management_date_active_from):
"""
Sets the loan_debt_management_date_active_from of this SecondMarketRequest.
Latest debt management date active from
:param loan_debt_management_date_active_from: The loan_debt_management_date_active_from of this SecondMarketRequest.
:type: datetime
"""
self._loan_debt_management_date_active_from = loan_debt_management_date_active_from
@property
def loan_debt_management_date_active_to(self):
"""
Gets the loan_debt_management_date_active_to of this SecondMarketRequest.
Latest debt management date active to
:return: The loan_debt_management_date_active_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._loan_debt_management_date_active_to
@loan_debt_management_date_active_to.setter
def loan_debt_management_date_active_to(self, loan_debt_management_date_active_to):
"""
Sets the loan_debt_management_date_active_to of this SecondMarketRequest.
Latest debt management date active to
:param loan_debt_management_date_active_to: The loan_debt_management_date_active_to of this SecondMarketRequest.
:type: datetime
"""
self._loan_debt_management_date_active_to = loan_debt_management_date_active_to
@property
def late_principal_amount_min(self):
"""
Gets the late_principal_amount_min of this SecondMarketRequest.
Principal debt amount min
:return: The late_principal_amount_min of this SecondMarketRequest.
:rtype: float
"""
return self._late_principal_amount_min
@late_principal_amount_min.setter
def late_principal_amount_min(self, late_principal_amount_min):
"""
Sets the late_principal_amount_min of this SecondMarketRequest.
Principal debt amount min
:param late_principal_amount_min: The late_principal_amount_min of this SecondMarketRequest.
:type: float
"""
self._late_principal_amount_min = late_principal_amount_min
@property
def late_principal_amount_max(self):
"""
Gets the late_principal_amount_max of this SecondMarketRequest.
Principal debt amount max
:return: The late_principal_amount_max of this SecondMarketRequest.
:rtype: float
"""
return self._late_principal_amount_max
@late_principal_amount_max.setter
def late_principal_amount_max(self, late_principal_amount_max):
"""
Sets the late_principal_amount_max of this SecondMarketRequest.
Principal debt amount max
:param late_principal_amount_max: The late_principal_amount_max of this SecondMarketRequest.
:type: float
"""
self._late_principal_amount_max = late_principal_amount_max
@property
def price_min(self):
"""
Gets the price_min of this SecondMarketRequest.
Price amount min
:return: The price_min of this SecondMarketRequest.
:rtype: float
"""
return self._price_min
@price_min.setter
def price_min(self, price_min):
"""
Sets the price_min of this SecondMarketRequest.
Price amount min
:param price_min: The price_min of this SecondMarketRequest.
:type: float
"""
self._price_min = price_min
@property
def price_max(self):
"""
Gets the price_max of this SecondMarketRequest.
Price amount max
:return: The price_max of this SecondMarketRequest.
:rtype: float
"""
return self._price_max
@price_max.setter
def price_max(self, price_max):
"""
Sets the price_max of this SecondMarketRequest.
Price amount max
:param price_max: The price_max of this SecondMarketRequest.
:type: float
"""
self._price_max = price_max
@property
def use_of_loan(self):
"""
Gets the use_of_loan of this SecondMarketRequest.
Use of loan
:return: The use_of_loan of this SecondMarketRequest.
:rtype: int
"""
return self._use_of_loan
@use_of_loan.setter
def use_of_loan(self, use_of_loan):
"""
Sets the use_of_loan of this SecondMarketRequest.
Use of loan
:param use_of_loan: The use_of_loan of this SecondMarketRequest.
:type: int
"""
allowed_values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, -1]
if use_of_loan not in allowed_values:
raise ValueError(
"Invalid value for `use_of_loan` ({0}), must be one of {1}"
.format(use_of_loan, allowed_values)
)
self._use_of_loan = use_of_loan
@property
def has_new_schedule(self):
"""
Gets the has_new_schedule of this SecondMarketRequest.
Has been rescheduled
:return: The has_new_schedule of this SecondMarketRequest.
:rtype: bool
"""
return self._has_new_schedule
@has_new_schedule.setter
def has_new_schedule(self, has_new_schedule):
"""
Sets the has_new_schedule of this SecondMarketRequest.
Has been rescheduled
:param has_new_schedule: The has_new_schedule of this SecondMarketRequest.
:type: bool
"""
self._has_new_schedule = has_new_schedule
@property
def countries(self):
"""
Gets the countries of this SecondMarketRequest.
Two letter iso code for country of origin: EE, ES, FI
:return: The countries of this SecondMarketRequest.
:rtype: list[str]
"""
return self._countries
@countries.setter
def countries(self, countries):
"""
Sets the countries of this SecondMarketRequest.
Two letter iso code for country of origin: EE, ES, FI
:param countries: The countries of this SecondMarketRequest.
:type: list[str]
"""
self._countries = countries
@property
def ratings(self):
"""
Gets the ratings of this SecondMarketRequest.
Bondora's rating: AA, A, B, C, D, E, F, HR
:return: The ratings of this SecondMarketRequest.
:rtype: list[str]
"""
return self._ratings
@ratings.setter
def ratings(self, ratings):
"""
Sets the ratings of this SecondMarketRequest.
Bondora's rating: AA, A, B, C, D, E, F, HR
:param ratings: The ratings of this SecondMarketRequest.
:type: list[str]
"""
self._ratings = ratings
@property
def credit_score_min(self):
"""
Gets the credit_score_min of this SecondMarketRequest.
Minimum credit score
:return: The credit_score_min of this SecondMarketRequest.
:rtype: int
"""
return self._credit_score_min
@credit_score_min.setter
def credit_score_min(self, credit_score_min):
"""
Sets the credit_score_min of this SecondMarketRequest.
Minimum credit score
:param credit_score_min: The credit_score_min of this SecondMarketRequest.
:type: int
"""
self._credit_score_min = credit_score_min
@property
def credit_score_max(self):
"""
Gets the credit_score_max of this SecondMarketRequest.
Maximum credit score
:return: The credit_score_max of this SecondMarketRequest.
:rtype: int
"""
return self._credit_score_max
@credit_score_max.setter
def credit_score_max(self, credit_score_max):
"""
Sets the credit_score_max of this SecondMarketRequest.
Maximum credit score
:param credit_score_max: The credit_score_max of this SecondMarketRequest.
:type: int
"""
self._credit_score_max = credit_score_max
@property
def user_name(self):
"""
Gets the user_name of this SecondMarketRequest.
Borrower's username
:return: The user_name of this SecondMarketRequest.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this SecondMarketRequest.
Borrower's username
:param user_name: The user_name of this SecondMarketRequest.
:type: str
"""
self._user_name = user_name
@property
def gender(self):
"""
Gets the gender of this SecondMarketRequest.
Borrower's gender: Male 0, Female 1, Unknown 2
:return: The gender of this SecondMarketRequest.
:rtype: int
"""
return self._gender
@gender.setter
def gender(self, gender):
"""
Sets the gender of this SecondMarketRequest.
Borrower's gender: Male 0, Female 1, Unknown 2
:param gender: The gender of this SecondMarketRequest.
:type: int
"""
self._gender = gender
@property
def age_min(self):
"""
Gets the age_min of this SecondMarketRequest.
Minimal age
:return: The age_min of this SecondMarketRequest.
:rtype: int
"""
return self._age_min
@age_min.setter
def age_min(self, age_min):
"""
Sets the age_min of this SecondMarketRequest.
Minimal age
:param age_min: The age_min of this SecondMarketRequest.
:type: int
"""
self._age_min = age_min
@property
def age_max(self):
"""
Gets the age_max of this SecondMarketRequest.
Maximum age
:return: The age_max of this SecondMarketRequest.
:rtype: int
"""
return self._age_max
@age_max.setter
def age_max(self, age_max):
"""
Sets the age_max of this SecondMarketRequest.
Maximum age
:param age_max: The age_max of this SecondMarketRequest.
:type: int
"""
self._age_max = age_max
@property
def income_verification_status(self):
"""
Gets the income_verification_status of this SecondMarketRequest.
Income verification type
:return: The income_verification_status of this SecondMarketRequest.
:rtype: int
"""
return self._income_verification_status
@income_verification_status.setter
def income_verification_status(self, income_verification_status):
"""
Sets the income_verification_status of this SecondMarketRequest.
Income verification type
:param income_verification_status: The income_verification_status of this SecondMarketRequest.
:type: int
"""
allowed_values = [1, 2, 3, 4]
if income_verification_status not in allowed_values:
raise ValueError(
"Invalid value for `income_verification_status` ({0}), must be one of {1}"
.format(income_verification_status, allowed_values)
)
self._income_verification_status = income_verification_status
@property
def show_my_items(self):
"""
Gets the show_my_items of this SecondMarketRequest.
Can find your own items from market: Value Null = ALL, True = only your items, False = other user items
:return: The show_my_items of this SecondMarketRequest.
:rtype: bool
"""
return self._show_my_items
@show_my_items.setter
def show_my_items(self, show_my_items):
"""
Sets the show_my_items of this SecondMarketRequest.
Can find your own items from market: Value Null = ALL, True = only your items, False = other user items
:param show_my_items: The show_my_items of this SecondMarketRequest.
:type: bool
"""
self._show_my_items = show_my_items
@property
def auction_id(self):
"""
Gets the auction_id of this SecondMarketRequest.
Can find specific auction from market
:return: The auction_id of this SecondMarketRequest.
:rtype: str
"""
return self._auction_id
@auction_id.setter
def auction_id(self, auction_id):
"""
Sets the auction_id of this SecondMarketRequest.
Can find specific auction from market
:param auction_id: The auction_id of this SecondMarketRequest.
:type: str
"""
self._auction_id = auction_id
@property
def listed_on_date_from(self):
"""
Gets the listed_on_date_from of this SecondMarketRequest.
Date when item was published from
:return: The listed_on_date_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._listed_on_date_from
@listed_on_date_from.setter
def listed_on_date_from(self, listed_on_date_from):
"""
Sets the listed_on_date_from of this SecondMarketRequest.
Date when item was published from
:param listed_on_date_from: The listed_on_date_from of this SecondMarketRequest.
:type: datetime
"""
self._listed_on_date_from = listed_on_date_from
@property
def listed_on_date_to(self):
"""
Gets the listed_on_date_to of this SecondMarketRequest.
Date when item was published to
:return: The listed_on_date_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._listed_on_date_to
@listed_on_date_to.setter
def listed_on_date_to(self, listed_on_date_to):
"""
Sets the listed_on_date_to of this SecondMarketRequest.
Date when item was published to
:param listed_on_date_to: The listed_on_date_to of this SecondMarketRequest.
:type: datetime
"""
self._listed_on_date_to = listed_on_date_to
@property
def debt_occured_on_from(self):
"""
Gets the debt_occured_on_from of this SecondMarketRequest.
Principal debt started date from
:return: The debt_occured_on_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._debt_occured_on_from
@debt_occured_on_from.setter
def debt_occured_on_from(self, debt_occured_on_from):
"""
Sets the debt_occured_on_from of this SecondMarketRequest.
Principal debt started date from
:param debt_occured_on_from: The debt_occured_on_from of this SecondMarketRequest.
:type: datetime
"""
self._debt_occured_on_from = debt_occured_on_from
@property
def debt_occured_on_to(self):
"""
Gets the debt_occured_on_to of this SecondMarketRequest.
Principal debt started date to
:return: The debt_occured_on_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._debt_occured_on_to
@debt_occured_on_to.setter
def debt_occured_on_to(self, debt_occured_on_to):
"""
Sets the debt_occured_on_to of this SecondMarketRequest.
Principal debt started date to
:param debt_occured_on_to: The debt_occured_on_to of this SecondMarketRequest.
:type: datetime
"""
self._debt_occured_on_to = debt_occured_on_to
@property
def debt_occured_on_for_secondary_from(self):
"""
Gets the debt_occured_on_for_secondary_from of this SecondMarketRequest.
Interest debt started date from
:return: The debt_occured_on_for_secondary_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._debt_occured_on_for_secondary_from
@debt_occured_on_for_secondary_from.setter
def debt_occured_on_for_secondary_from(self, debt_occured_on_for_secondary_from):
"""
Sets the debt_occured_on_for_secondary_from of this SecondMarketRequest.
Interest debt started date from
:param debt_occured_on_for_secondary_from: The debt_occured_on_for_secondary_from of this SecondMarketRequest.
:type: datetime
"""
self._debt_occured_on_for_secondary_from = debt_occured_on_for_secondary_from
@property
def debt_occured_on_for_secondary_to(self):
"""
Gets the debt_occured_on_for_secondary_to of this SecondMarketRequest.
Interest debt started date to
:return: The debt_occured_on_for_secondary_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._debt_occured_on_for_secondary_to
@debt_occured_on_for_secondary_to.setter
def debt_occured_on_for_secondary_to(self, debt_occured_on_for_secondary_to):
"""
Sets the debt_occured_on_for_secondary_to of this SecondMarketRequest.
Interest debt started date to
:param debt_occured_on_for_secondary_to: The debt_occured_on_for_secondary_to of this SecondMarketRequest.
:type: datetime
"""
self._debt_occured_on_for_secondary_to = debt_occured_on_for_secondary_to
@property
def defaulted_date_from(self):
"""
Gets the defaulted_date_from of this SecondMarketRequest.
Defaulted date from
:return: The defaulted_date_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._defaulted_date_from
@defaulted_date_from.setter
def defaulted_date_from(self, defaulted_date_from):
"""
Sets the defaulted_date_from of this SecondMarketRequest.
Defaulted date from
:param defaulted_date_from: The defaulted_date_from of this SecondMarketRequest.
:type: datetime
"""
self._defaulted_date_from = defaulted_date_from
@property
def defaulted_date_to(self):
"""
Gets the defaulted_date_to of this SecondMarketRequest.
Defaulted date to
:return: The defaulted_date_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._defaulted_date_to
@defaulted_date_to.setter
def defaulted_date_to(self, defaulted_date_to):
"""
Sets the defaulted_date_to of this SecondMarketRequest.
Defaulted date to
:param defaulted_date_to: The defaulted_date_to of this SecondMarketRequest.
:type: datetime
"""
self._defaulted_date_to = defaulted_date_to
@property
def rescheduled_from(self):
"""
Gets the rescheduled_from of this SecondMarketRequest.
Rescheduled date from
:return: The rescheduled_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._rescheduled_from
@rescheduled_from.setter
def rescheduled_from(self, rescheduled_from):
"""
Sets the rescheduled_from of this SecondMarketRequest.
Rescheduled date from
:param rescheduled_from: The rescheduled_from of this SecondMarketRequest.
:type: datetime
"""
self._rescheduled_from = rescheduled_from
@property
def rescheduled_to(self):
"""
Gets the rescheduled_to of this SecondMarketRequest.
Rescheduled date to
:return: The rescheduled_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._rescheduled_to
@rescheduled_to.setter
def rescheduled_to(self, rescheduled_to):
"""
Sets the rescheduled_to of this SecondMarketRequest.
Rescheduled date to
:param rescheduled_to: The rescheduled_to of this SecondMarketRequest.
:type: datetime
"""
self._rescheduled_to = rescheduled_to
@property
def last_payment_date_from(self):
"""
Gets the last_payment_date_from of this SecondMarketRequest.
Last payment date from
:return: The last_payment_date_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._last_payment_date_from
@last_payment_date_from.setter
def last_payment_date_from(self, last_payment_date_from):
"""
Sets the last_payment_date_from of this SecondMarketRequest.
Last payment date from
:param last_payment_date_from: The last_payment_date_from of this SecondMarketRequest.
:type: datetime
"""
self._last_payment_date_from = last_payment_date_from
@property
def last_payment_date_to(self):
"""
Gets the last_payment_date_to of this SecondMarketRequest.
Last payment date to
:return: The last_payment_date_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._last_payment_date_to
@last_payment_date_to.setter
def last_payment_date_to(self, last_payment_date_to):
"""
Sets the last_payment_date_to of this SecondMarketRequest.
Last payment date to
:param last_payment_date_to: The last_payment_date_to of this SecondMarketRequest.
:type: datetime
"""
self._last_payment_date_to = last_payment_date_to
@property
def next_payment_date_from(self):
"""
Gets the next_payment_date_from of this SecondMarketRequest.
Next payment date from
:return: The next_payment_date_from of this SecondMarketRequest.
:rtype: datetime
"""
return self._next_payment_date_from
@next_payment_date_from.setter
def next_payment_date_from(self, next_payment_date_from):
"""
Sets the next_payment_date_from of this SecondMarketRequest.
Next payment date from
:param next_payment_date_from: The next_payment_date_from of this SecondMarketRequest.
:type: datetime
"""
self._next_payment_date_from = next_payment_date_from
@property
def next_payment_date_to(self):
"""
Gets the next_payment_date_to of this SecondMarketRequest.
Next payment date to
:return: The next_payment_date_to of this SecondMarketRequest.
:rtype: datetime
"""
return self._next_payment_date_to
@next_payment_date_to.setter
def next_payment_date_to(self, next_payment_date_to):
"""
Sets the next_payment_date_to of this SecondMarketRequest.
Next payment date to
:param next_payment_date_to: The next_payment_date_to of this SecondMarketRequest.
:type: datetime
"""
self._next_payment_date_to = next_payment_date_to
@property
def desired_discount_rate_min(self):
"""
Gets the desired_discount_rate_min of this SecondMarketRequest.
Minimal DesiredDiscountRate
:return: The desired_discount_rate_min of this SecondMarketRequest.
:rtype: float
"""
return self._desired_discount_rate_min
@desired_discount_rate_min.setter
def desired_discount_rate_min(self, desired_discount_rate_min):
"""
Sets the desired_discount_rate_min of this SecondMarketRequest.
Minimal DesiredDiscountRate
:param desired_discount_rate_min: The desired_discount_rate_min of this SecondMarketRequest.
:type: float
"""
self._desired_discount_rate_min = desired_discount_rate_min
@property
def desired_discount_rate_max(self):
"""
Gets the desired_discount_rate_max of this SecondMarketRequest.
Maximal DesiredDiscountRate
:return: The desired_discount_rate_max of this SecondMarketRequest.
:rtype: float
"""
return self._desired_discount_rate_max
@desired_discount_rate_max.setter
def desired_discount_rate_max(self, desired_discount_rate_max):
"""
Sets the desired_discount_rate_max of this SecondMarketRequest.
Maximal DesiredDiscountRate
:param desired_discount_rate_max: The desired_discount_rate_max of this SecondMarketRequest.
:type: float
"""
self._desired_discount_rate_max = desired_discount_rate_max
@property
def xirr_min(self):
"""
Gets the xirr_min of this SecondMarketRequest.
Minimal Xirr
:return: The xirr_min of this SecondMarketRequest.
:rtype: float
"""
return self._xirr_min
@xirr_min.setter
def xirr_min(self, xirr_min):
"""
Sets the xirr_min of this SecondMarketRequest.
Minimal Xirr
:param xirr_min: The xirr_min of this SecondMarketRequest.
:type: float
"""
self._xirr_min = xirr_min
@property
def xirr_max(self):
"""
Gets the xirr_max of this SecondMarketRequest.
Maximal Xirr
:return: The xirr_max of this SecondMarketRequest.
:rtype: float
"""
return self._xirr_max
@xirr_max.setter
def xirr_max(self, xirr_max):
"""
Sets the xirr_max of this SecondMarketRequest.
Maximal Xirr
:param xirr_max: The xirr_max of this SecondMarketRequest.
:type: float
"""
self._xirr_max = xirr_max
@property
def page_size(self):
"""
Gets the page_size of this SecondMarketRequest.
Max items in result, default is 1000
:return: The page_size of this SecondMarketRequest.
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""
Sets the page_size of this SecondMarketRequest.
Max items in result, default is 1000
:param page_size: The page_size of this SecondMarketRequest.
:type: int
"""
if not page_size:
raise ValueError("Invalid value for `page_size`, must not be `None`")
if page_size > 1000.0:
raise ValueError("Invalid value for `page_size`, must be a value less than or equal to `1000.0`")
if page_size < 1.0:
raise ValueError("Invalid value for `page_size`, must be a value greater than or equal to `1.0`")
self._page_size = page_size
@property
def page_nr(self):
"""
Gets the page_nr of this SecondMarketRequest.
Result page nr
:return: The page_nr of this SecondMarketRequest.
:rtype: int
"""
return self._page_nr
@page_nr.setter
def page_nr(self, page_nr):
"""
Sets the page_nr of this SecondMarketRequest.
Result page nr
:param page_nr: The page_nr of this SecondMarketRequest.
:type: int
"""
if not page_nr:
raise ValueError("Invalid value for `page_nr`, must not be `None`")
if page_nr > 2.147483647E9:
raise ValueError("Invalid value for `page_nr`, must be a value less than or equal to `2.147483647E9`")
if page_nr < 1.0:
raise ValueError("Invalid value for `page_nr`, must be a value greater than or equal to `1.0`")
self._page_nr = page_nr
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.363827 | 1,203 | 0.653296 |
3ace3153b071f179947b360975488015a1eebd4e | 770 | py | Python | basic/serializers.py | uadson/drf-studies | ef88c1a2123a6503cb74cfa3dfe9477fc54d7463 | [
"MIT"
] | null | null | null | basic/serializers.py | uadson/drf-studies | ef88c1a2123a6503cb74cfa3dfe9477fc54d7463 | [
"MIT"
] | null | null | null | basic/serializers.py | uadson/drf-studies | ef88c1a2123a6503cb74cfa3dfe9477fc54d7463 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from basic.models.course import Course
from basic.models.exam import Exam
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = (
'id',
'title',
'url',
'created',
'active'
)
class ExamSerializer(serializers.ModelSerializer):
class Meta:
# email indisponível para GET, somente para POST
extra_kwargs = {
'email': {'write_only': True}
}
model = Exam
fields = (
'id',
'course',
'name',
'email',
'comment',
'evaluation',
'created',
'active',
)
| 19.74359 | 56 | 0.492208 |
59d2fecd838ae5eaee707a2ceded9916cfe14823 | 8,031 | py | Python | zendesk/komand_zendesk/actions/update_ticket/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | zendesk/komand_zendesk/actions/update_ticket/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | zendesk/komand_zendesk/actions/update_ticket/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ASSIGNEE_ID = "assignee_id"
ATTACHMENT = "attachment"
COLLABORATOR_IDS = "collaborator_ids"
DESCRIPTION = "description"
DUE_AT = "due_at"
EXTERNAL_ID = "external_id"
GROUP_ID = "group_id"
PRIORITY = "priority"
PROBLEM_ID = "problem_id"
RECIPIENT = "recipient"
REQUESTER_ID = "requester_id"
STATUS = "status"
SUBJECT = "subject"
TAGS = "tags"
TICKET_ID = "ticket_id"
TYPE = "type"
class Output:
TICKET = "ticket"
class UpdateTicketInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"assignee_id": {
"type": "string",
"title": "Assignee ID",
"description": "Assignee ID",
"order": 4
},
"attachment": {
"$ref": "#/definitions/file",
"title": "Attachment",
"description": "Optional file attachment",
"order": 3
},
"collaborator_ids": {
"type": "array",
"title": "Collaborator IDs",
"description": "List of collaborator IDs",
"items": {
"type": "string"
},
"order": 5
},
"description": {
"type": "string",
"title": "Description",
"description": "Ticket description",
"order": 6
},
"due_at": {
"type": "string",
"title": "Due At",
"displayType": "date",
"description": "Time ticket is due",
"format": "date-time",
"order": 7
},
"external_id": {
"type": "string",
"title": "External ID",
"description": "Support ticket ID",
"order": 8
},
"group_id": {
"type": "string",
"title": "Group ID",
"description": "Group ID",
"order": 9
},
"priority": {
"type": "string",
"title": "Priority",
"description": "Ticket priority",
"enum": [
"Urgent",
"High",
"Normal",
"Low",
""
],
"order": 15
},
"problem_id": {
"type": "string",
"title": "Problem ID",
"description": "For tickets of type 'incident', the numeric ID of the problem the incident is linked to",
"order": 11
},
"recipient": {
"type": "string",
"title": "Recipient ID",
"description": "ID of user recipient",
"order": 10
},
"requester_id": {
"type": "string",
"title": "Requester ID",
"description": "ID of user requesting support",
"order": 2
},
"status": {
"type": "string",
"title": "Status",
"description": "Ticket status",
"enum": [
"New",
"Open",
"Pending",
"Hold",
"Solved",
"Closed",
""
],
"order": 16
},
"subject": {
"type": "string",
"title": "Subject",
"description": "Subject of ticket",
"order": 12
},
"tags": {
"type": "array",
"title": "Tags",
"description": "Tags describing ticket",
"items": {
"type": "string"
},
"order": 13
},
"ticket_id": {
"type": "string",
"title": "Ticket ID",
"description": "Ticket ID",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "Ticket type",
"enum": [
"Problem",
"Incident",
"Task",
"Question",
""
],
"order": 14
}
},
"required": [
"requester_id",
"ticket_id"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class UpdateTicketOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"ticket": {
"$ref": "#/definitions/ticket",
"title": "Ticket",
"description": "Ticket meta data",
"order": 1
}
},
"required": [
"ticket"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"ticket": {
"type": "object",
"title": "ticket",
"properties": {
"assignee_id": {
"type": "string",
"title": "Assignee ID",
"order": 2
},
"attachment": {
"$ref": "#/definitions/file",
"title": "Attachment",
"order": 1
},
"collaborator_ids": {
"type": "array",
"title": "Collaborator IDs",
"items": {
"type": "string"
},
"order": 3
},
"description": {
"type": "string",
"title": "Description",
"order": 4
},
"due_at": {
"type": "string",
"title": "Due At",
"displayType": "date",
"format": "date-time",
"order": 5
},
"external_id": {
"type": "string",
"title": "External ID",
"order": 6
},
"group_id": {
"type": "integer",
"title": "Group ID",
"order": 7
},
"priority": {
"type": "string",
"title": "Priority",
"enum": [
"Urgent",
"High",
"Normal",
"Low",
""
],
"order": 14
},
"problem_id": {
"type": "string",
"title": "Problem ID",
"order": 10
},
"recipient": {
"type": "string",
"title": "Recipient ID",
"order": 9
},
"requester_id": {
"type": "string",
"title": "Requester ID",
"order": 8
},
"status": {
"type": "string",
"title": "Status",
"enum": [
"New",
"Open",
"Pending",
"Hold",
"Solved",
"Closed",
""
],
"order": 15
},
"subject": {
"type": "string",
"title": "Subject",
"order": 11
},
"tags": {
"type": "array",
"title": "Tags",
"items": {
"type": "string"
},
"order": 12
},
"type": {
"type": "string",
"title": "Type",
"enum": [
"Problem",
"Incident",
"Task",
"Question",
""
],
"order": 13
}
},
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 21.705405 | 111 | 0.415017 |
fa750bf446eb0444b4cf05aa705b2437dfb8d60d | 6,149 | py | Python | attention.py | xwhan/Extremely-Fine-Grained-Entity-Typing | d8660ccfa70b0bbeec9a200bf2336aa0d7aa7ab7 | [
"MIT"
] | 89 | 2019-03-06T22:42:59.000Z | 2021-09-29T03:34:27.000Z | attention.py | CPF-NLPR/Extremely-Fine-Grained-Entity-Typing | d8660ccfa70b0bbeec9a200bf2336aa0d7aa7ab7 | [
"MIT"
] | 5 | 2019-03-12T16:11:46.000Z | 2020-09-11T08:21:31.000Z | attention.py | CPF-NLPR/Extremely-Fine-Grained-Entity-Typing | d8660ccfa70b0bbeec9a200bf2336aa0d7aa7ab7 | [
"MIT"
] | 8 | 2019-03-08T15:05:29.000Z | 2021-07-09T15:30:05.000Z | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
import copy
import math
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class SimpleEncoder(nn.Module):
"""
takes (batch_size, seq_len, embed_dim) as inputs
calculate MASK, POSITION_ENCODING
"""
def __init__(self, embed_dim, head=4, layer=1, dropout=0.1):
super(SimpleEncoder, self).__init__()
d_ff = 2 * embed_dim
self.position = PositionalEncoding(embed_dim, dropout)
attn = MultiHeadedAttention(head, embed_dim)
ff = PositionwiseFeedForward(embed_dim, d_ff)
self.encoder = Encoder(EncoderLayer(embed_dim, attn, ff, dropout), layer)
def forward(self, x, mask, lens):
mask = mask.unsqueeze(-2)
x = self.position(x)
x = self.encoder(x, mask)
return x
if __name__ == '__main__':
encoder = SimpleEncoder(350, 2, 1)
inputs = torch.zeros(1000,50,350)
lens = [10] * 1000
encoder(inputs, lens) | 36.384615 | 81 | 0.613921 |
81e6f980335e57076cfcdfe3f111f93d03901607 | 2,718 | py | Python | src/sage/parallel/ncpus.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | 2 | 2015-08-11T05:05:47.000Z | 2019-05-15T17:27:25.000Z | src/sage/parallel/ncpus.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | null | null | null | src/sage/parallel/ncpus.py | switzel/sage | 7eb8510dacf61b691664cd8f1d2e75e5d473e5a0 | [
"BSL-1.0"
] | 1 | 2020-07-24T11:56:55.000Z | 2020-07-24T11:56:55.000Z | """
CPU Detection
"""
# Parallel Python Software: http://www.parallelpython.com
# Copyright (c) 2005-2008, Vitalii Vanovschi
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
######
# This is from ParallelPython (the pp.py file).
import os, subprocess
def ncpus():
"""
Detects the number of effective CPUs in the system.
EXAMPLES::
sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.
2
"""
#for Linux, Unix and MacOS
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
#Linux and Unix
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else:
#MacOS X
#deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read())
process = subprocess.Popen("sysctl -n hw.ncpu", shell=True, stdin=subprocess.PIPE, stdout = subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
return int(process.stdout.read())
#for Windows
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
#return the default value
return 1
| 43.142857 | 160 | 0.705298 |
ee201074487e251f7e05fa3a294744e69577b0f1 | 7,835 | py | Python | climart/models/modules/additional_layers.py | RolnickLab/climart | 6c5c139d6b4ad5925cbd0ec8c9ccf15302563b40 | [
"CC-BY-4.0"
] | 12 | 2021-09-29T22:04:37.000Z | 2022-03-01T07:25:33.000Z | climart/models/modules/additional_layers.py | RolnickLab/climart | 6c5c139d6b4ad5925cbd0ec8c9ccf15302563b40 | [
"CC-BY-4.0"
] | null | null | null | climart/models/modules/additional_layers.py | RolnickLab/climart | 6c5c139d6b4ad5925cbd0ec8c9ccf15302563b40 | [
"CC-BY-4.0"
] | 2 | 2021-09-30T11:20:28.000Z | 2021-12-20T13:39:42.000Z | from collections import OrderedDict
from typing import Dict, Optional, Union, Callable, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from climart.models.modules.mlp import MLP
class Multiscale_Module(nn.Module):
def __init__(self, in_channels=None, channels_per_layer=None, out_shape=None,
dil_rate=1, use_act=False, *args, **kwargs):
super().__init__()
self.out_shape = out_shape
self.use_act = use_act
self.multi_3 = nn.Conv1d(in_channels=in_channels, out_channels=channels_per_layer,
kernel_size=5, stride=1, dilation=dil_rate)
self.multi_5 = nn.Conv1d(in_channels=in_channels, out_channels=channels_per_layer,
kernel_size=6, stride=1, dilation=dil_rate)
self.multi_7 = nn.Conv1d(in_channels=in_channels, out_channels=channels_per_layer,
kernel_size=9, stride=1, dilation=dil_rate)
self.after_concat = nn.Conv1d(in_channels=int(channels_per_layer * 3),
out_channels=int(channels_per_layer / 2), kernel_size=1, stride=1)
self.gap = GAP()
def forward(self, x):
x_3 = self.multi_3(x)
x_5 = self.multi_5(x)
x_7 = self.multi_7(x)
x_3 = F.adaptive_max_pool1d(x_3, self.out_shape)
x_5 = F.adaptive_max_pool1d(x_5, self.out_shape)
x_7 = F.adaptive_max_pool1d(x_7, self.out_shape)
x_concat = torch.cat((x_3, x_5, x_7), 1)
x_concat = self.after_concat(x_concat)
if self.use_act:
return torch.sigmoid(self.gap(x)) * x_concat
else:
return x_concat
class GAP():
def __init__(self):
pass
def __call__(self, x):
x = F.adaptive_avg_pool1d(x, 1)
return x
class SE_Block(nn.Module):
def __init__(self, c, r=16):
super().__init__()
self.squeeze = GAP()
self.excitation = nn.Sequential(
nn.Linear(c, c // r, bias=False),
nn.ReLU(inplace=True),
nn.Linear(c // r, c, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, f = x.shape
y = self.squeeze(x).view(b, c)
y = self.excitation(y).view(b, c, 1)
return x * y.expand_as(x)
class FeatureProjector(nn.Module):
def __init__(
self,
input_name_to_feature_dim: Dict[str, int],
projection_dim: int = 128,
projector_n_layers: int = 1,
projector_activation_func: str = 'Gelu',
projector_net_normalization: str = 'none',
projections_aggregation: Optional[Union[Callable[[Dict[str, Tensor]], Any], str]] = None,
output_normalization: bool = True,
output_activation_function: bool = False
):
super().__init__()
self.projection_dim = projection_dim
self.input_name_to_feature_dim = input_name_to_feature_dim
self.projections_aggregation = projections_aggregation
input_name_to_projector = OrderedDict()
for input_name, feature_dim in input_name_to_feature_dim.items():
projector_hidden_dim = int((feature_dim + projection_dim) / 2)
projector = MLP(
input_dim=feature_dim,
hidden_dims=[projector_hidden_dim for _ in range(projector_n_layers)],
output_dim=projection_dim,
activation_function=projector_activation_func,
net_normalization=projector_net_normalization,
dropout=0,
output_normalization=output_normalization,
output_activation_function=output_activation_function,
name=f"{input_name}_MLP_projector"
)
input_name_to_projector[input_name] = projector
self.input_name_to_projector = nn.ModuleDict(input_name_to_projector)
def forward(self,
inputs: Dict[str, Tensor]
) -> Union[Dict[str, Tensor], Any]:
name_to_projection = dict()
for name, projector in self.input_name_to_projector.items():
# Project (batch-size, ..arbitrary_dim(s).., in_feat_dim)
# to (batch-size, ..arbitrary_dim(s).., projection_dim)
in_feat_dim = self.input_name_to_feature_dim[name]
input_tensor = inputs[name]
shape_out = list(input_tensor.shape)
shape_out[-1] = self.projection_dim
projector_input = input_tensor.reshape((-1, in_feat_dim))
# projector_input has shape (batch-size * #spatial-dims, features)
flattened_projection = projector(projector_input)
name_to_projection[name] = flattened_projection.reshape(shape_out)
if self.projections_aggregation is None:
return name_to_projection # Dict[str, Tensor]
else:
return self.projections_aggregation(name_to_projection) # Any
class PredictorHeads(nn.Module):
"""
Module to predict (with one or more MLPs) desired output based on a 1D hidden representation.
Can be used to:
- readout a hidden vector to a desired output dimensionality
- predict multiple variables with separate MLP heads (e.g. one for rsuc and rsdc each)
"""
def __init__(
self,
input_dim: int,
var_name_to_output_dim: Dict[str, int],
separate_heads: bool = True,
n_layers: int = 1,
activation_func: str = 'Gelu',
net_normalization: str = 'none',
):
super().__init__()
self.input_dim = input_dim
self.separate_heads = separate_heads
predictor_heads = OrderedDict()
mlp_shared_params = {
'input_dim': input_dim,
'activation_function': activation_func,
'net_normalization': net_normalization,
'output_normalization': False,
'dropout': 0
}
if self.separate_heads:
self.output_name_to_feature_dim = var_name_to_output_dim
for output_name, var_out_dim in var_name_to_output_dim.items():
predictor_hidden_dim = int((input_dim + var_out_dim) / 2)
predictor = MLP(
hidden_dims=[predictor_hidden_dim for _ in range(n_layers)],
output_dim=var_out_dim,
**mlp_shared_params
)
predictor_heads[output_name] = predictor
else:
joint_out_dim = sum([out_dim for _, out_dim in var_name_to_output_dim.items()])
self.output_name_to_feature_dim = {'joint_output': joint_out_dim}
predictor_hidden_dim = int((input_dim + joint_out_dim) / 2)
predictor = MLP(
hidden_dims=[predictor_hidden_dim for _ in range(n_layers)],
output_dim=joint_out_dim,
**mlp_shared_params
)
predictor_heads['joint_output'] = predictor
self.predictor_heads = nn.ModuleDict(predictor_heads)
def forward(self,
hidden_input: Tensor, # (batch-size, hidden-dim) 1D tensor
as_dict: bool = False,
) -> Union[Dict[str, Tensor], Tensor]:
name_to_prediction = OrderedDict()
for name, predictor in self.predictor_heads.items():
name_to_prediction[name] = predictor(hidden_input)
if self.separate_heads:
if as_dict:
return name_to_prediction
else:
joint_output = torch.cat(list(name_to_prediction.values()), dim=-1)
return joint_output
else:
return name_to_prediction if as_dict else name_to_prediction['joint_output']
| 38.596059 | 104 | 0.610849 |
9898a7734636c88b992da001ecc48e1102580b0c | 597 | py | Python | confess/models/vote.py | revalo/hush.mit.edu | e47c28c934dcfb94c52f6e12367869389e8ed7a8 | [
"MIT"
] | 21 | 2017-10-30T20:55:48.000Z | 2021-09-03T14:06:58.000Z | confess/models/vote.py | revalo/hush.mit.edu | e47c28c934dcfb94c52f6e12367869389e8ed7a8 | [
"MIT"
] | 1 | 2021-11-08T02:05:34.000Z | 2021-11-08T06:54:41.000Z | confess/models/vote.py | revalo/hush.mit.edu | e47c28c934dcfb94c52f6e12367869389e8ed7a8 | [
"MIT"
] | 3 | 2017-11-15T23:18:00.000Z | 2018-01-01T06:44:03.000Z | from confess.models import db
from sqlalchemy.orm.exc import NoResultFound
from datetime import datetime
UPVOTE = 1
DOWNVOTE = -1
class Vote(db.Model):
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('post.id'))
user = db.Column(db.Integer, db.ForeignKey('user.id'))
value = db.Column(db.Integer)
class CommentVote(db.Model):
id = db.Column(db.Integer, primary_key=True)
comment_id = db.Column(db.Integer, db.ForeignKey('comment.id'))
user = db.Column(db.Integer, db.ForeignKey('user.id'))
value = db.Column(db.Integer) | 33.166667 | 67 | 0.711893 |
e23bae77dcd042c484b8244bae6d29aaa440cd53 | 7,304 | py | Python | code_server/model/graphsage_meanpool.py | Dragon-M-Ren/grad_code | d814b81adaec709d5dffd737f0c350953cc361fd | [
"Apache-2.0"
] | null | null | null | code_server/model/graphsage_meanpool.py | Dragon-M-Ren/grad_code | d814b81adaec709d5dffd737f0c350953cc361fd | [
"Apache-2.0"
] | null | null | null | code_server/model/graphsage_meanpool.py | Dragon-M-Ren/grad_code | d814b81adaec709d5dffd737f0c350953cc361fd | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from .base_model import BaseModel
from .layers import *
from .model_utils import *
import time
class GraphSageMeanPool(BaseModel):
def __init__(self,
hidden_num, hidden_dim,
input_dim, output_dim,
node_num, cate_num,
learning_rate, epochs,
weight_decay, early_stopping,
activation_func,
dropout_prob,
bias, optimizer,
name,
transform_size):
super(GraphSageMeanPool, self).__init__(
hidden_num, hidden_dim,
input_dim, output_dim,
learning_rate, epochs,
weight_decay, early_stopping,
name)
#End
self.total_nodes = node_num
self.total_cates = output_dim
self.activation_func = activation_func
self.dropout_prob = dropout_prob
self.bias = bias
self.transform_size = transform_size
self.num_nodes = node_num
#Add placeholders
#Note, this dictionary is used to create feed dicts
#, shape=(self.total_nodes, self.input_dim)
self.placeholders = {}
self.placeholders['features'] = tf.sparse_placeholder(tf.float32, name='Feature')
self.placeholders['adj'] = tf.sparse_placeholder(tf.float32, name='Adjancy')
self.placeholders['labels'] = tf.placeholder(tf.int32, shape=(self.total_nodes, self.total_cates), name='labels')
self.placeholders['mask'] = tf.placeholder(tf.int32, shape=(self.total_nodes), name='mask')
self.placeholders['num_features_nonzero'] = tf.placeholder(tf.int32, name='num_features_nonzero')
self.placeholders['degrees'] = tf.placeholder(tf.float32, shape = (self.total_nodes, 1), name='degrees')
self.placeholders['row'] = tf.placeholder(tf.int32, name='row')
self.placeholders['col'] = tf.placeholder(tf.int32, name='col')
self.adjancy = self.placeholders['adj']
self.inputs = self.placeholders['features']
self.label = self.placeholders['labels']
self.mask = self.placeholders['mask']
self.num_features_nonzero = self.placeholders['num_features_nonzero']
self.degrees = self.placeholders['degrees']
self.row = self.placeholders['row']
self.col = self.placeholders['col']
self.optimizer = optimizer(learning_rate=self.learning_rate)
self.build()
def _add_layers(self):
self.layers.append(
MeanPoolLayer(self.adjancy,
self.hidden_dim[0], self.hidden_dim[1],
self.activation_func,
self.name + '_0',
self.dropout_prob,
self.bias,
sparse = True,
degrees = self.degrees,
row = self.row,
col = self.col,
transform_size = self.transform_size[0],
num_nodes = self.num_nodes)
)
self.layers.append(
MeanPoolLayer(self.adjancy,
self.hidden_dim[1], self.hidden_dim[2],
self.activation_func,
self.name + '_1',
self.dropout_prob,
self.bias,
sparse = False,
degrees = self.degrees,
row = self.row,
col = self.col,
transform_size = self.transform_size[1],
num_nodes = self.num_nodes)
)
def _loss(self):
'''
Define loss function
'''
#loss
loss = masked_softmax_cross_entropy(self.outputs, self.label, self.mask)
#Regularization, weight_decay
for each_layer in self.layers:
for var in each_layer.weight_decay_vars:
print(var)
loss += self.weight_decay * tf.nn.l2_loss(var)
return loss
def _accuracy(self):
'''
Define accuracy
'''
accuracy = masked_accuracy(self.outputs, self.label, self.mask)
return accuracy
def train(self, sess, adj, features, train_label, val_label, train_mask, val_mask, num_features_nonzero, degrees, row, col):
'''
Train the model
'''
#Loss: Saves a list of the loss
train_loss_list = []
train_acc_list = []
val_loss_list = []
val_acc_list = []
#Construct the feed dict
feed_dict = {
self.adjancy: adj,
self.inputs: features,
self.label: train_label,
self.mask: train_mask,
self.num_features_nonzero: num_features_nonzero,
self.degrees: degrees,
self.row: row,
self.col: col
}
feed_dict_val = {
self.adjancy: adj,
self.inputs: features,
self.label: val_label,
self.mask: val_mask,
self.num_features_nonzero: num_features_nonzero,
self.degrees: degrees,
self.row: row,
self.col: col
}
sess.run(tf.global_variables_initializer())
#Train precedure
for epoch in range(self.epochs):
loss, train_accu, _ = sess.run([self.loss, self.accuracy, self.opt_op], feed_dict=feed_dict)
train_loss_list.append(loss)
train_acc_list.append(train_accu)
cost, val_accu = sess.run([self.loss, self.accuracy], feed_dict=feed_dict_val)
val_loss_list.append(cost)
val_acc_list.append(val_accu)
print('epochs: ', epoch, 'loss: ', loss, 'train_accu: ', train_accu, 'cost: ', cost, train_accu, 'accuracy: ', val_accu)
#Test early stopping
if early_stopping(val_acc_list, epoch, self.early_stopping):
print("Early stopping")
break
train_info = {'train_loss': train_loss_list, 'train_acc': train_acc_list,
'val_loss': val_loss_list, 'val_acc': val_acc_list}
return train_info
def predict(self, sess, adj, features, label, mask, num_features_nonzero, degrees, row, col):
'''
Predict, a cate-index representation will be provided
'''
feed_dict = {
self.adjancy: adj,
self.inputs: features,
self.label: label,
self.mask: mask,
self.num_features_nonzero: num_features_nonzero,
self.degrees: degrees,
self.row: row,
self.col: col
}
outputs = sess.run(self.outputs, feed_dict=feed_dict)
cate_index = tf.argmax(outputs, 1)
return cate_index
def test(self, sess, adj, features, label, mask, num_features_nonzero, degrees, row, col):
'''
Test the model, return accuracy
'''
t_strat = time.time()
feed_dict = {
self.adjancy: adj,
self.inputs: features,
self.label: label,
self.mask: mask,
self.num_features_nonzero: num_features_nonzero,
self.degrees: degrees,
self.row: row,
self.col: col
}
accu = sess.run(self.accuracy, feed_dict=feed_dict)
t_end = time.time()
time_used = t_end - t_strat
return accu, time_used
| 32.318584 | 133 | 0.573111 |
d4fa9ca9ec9eddb3919eca8d3b4f4554287a35f2 | 13,138 | py | Python | MAEnv/env_Drones/env_Drones.py | Abluceli/Multi-agent-Reinforcement-Learning-Algorithms | 15810a559e2f2cf9e5fcb158c083f9e9dd6012fc | [
"MIT"
] | 5 | 2020-05-25T03:08:09.000Z | 2022-02-27T05:57:28.000Z | MAEnv/env_Drones/env_Drones.py | Abluceli/Multi-agent-Reinforcement-Learning-Algorithms | 15810a559e2f2cf9e5fcb158c083f9e9dd6012fc | [
"MIT"
] | 1 | 2020-12-22T01:35:36.000Z | 2022-01-28T01:51:06.000Z | MAEnv/env_Drones/env_Drones.py | Abluceli/Multi-agent-Reinforcement-Learning-Algorithms | 15810a559e2f2cf9e5fcb158c083f9e9dd6012fc | [
"MIT"
] | 1 | 2020-05-06T01:56:55.000Z | 2020-05-06T01:56:55.000Z | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import random
import cv2
class Drones(object):
def __init__(self, pos, view_range):
self.pos = pos
self.view_range = view_range
class Human(object):
def __init__(self, pos):
self.pos = pos
class EnvDrones(object):
def __init__(self, map_size, drone_num, view_range, tree_num, human_num):
self.map_size = map_size
self.drone_num = drone_num
self.tree_num = tree_num
self.human_num = human_num
self.view_range = view_range
self.action_dim = 4
self.full_state_shape = (self.map_size, self.map_size, 3)
self.drones_shape = (2 * self.view_range - 1, 2 * self.view_range - 1, 3)
# initialize blocks and trees
self.land_mark_map = np.zeros((self.map_size, self.map_size))
def reset(self):
# initialize blocks and trees
self.land_mark_map = np.zeros((self.map_size, self.map_size))
for i in range(self.map_size):
for j in range(self.map_size):
if random.random() < 0.01:
self.land_mark_map[i, j] = 1 # wall
# intialize tree
for i in range(self.tree_num):
temp_pos = [random.randint(0, self.map_size-1), random.randint(0, self.map_size-1)]
while self.land_mark_map[temp_pos[0], temp_pos[1]] != 0:
temp_pos = [random.randint(0, self.map_size-1), random.randint(0, self.map_size-1)]
self.land_mark_map[temp_pos[0], temp_pos[1]] = 2
# initialize humans
self.human_list = []
for i in range(self.human_num):
temp_pos = [random.randint(0, self.map_size-1), random.randint(0, self.map_size-1)]
while self.land_mark_map[temp_pos[0], temp_pos[1]] != 0:
temp_pos = [random.randint(0, self.map_size-1), random.randint(0, self.map_size-1)]
temp_human = Human(temp_pos)
self.human_list.append(temp_human)
# initialize drones
self.start_pos = [self.map_size-1, self.map_size-1]
self.drone_list = []
for i in range(self.drone_num):
temp_drone = Drones(self.start_pos, self.view_range)
self.drone_list.append(temp_drone)
self.rand_reset_drone_pos()
def get_full_obs(self):
obs = np.ones((self.map_size, self.map_size, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if self.land_mark_map[i, j] == 1:
obs[i, j, 0] = 0
obs[i, j, 1] = 0
obs[i, j, 2] = 0
if self.land_mark_map[i, j] == 2:
obs[i, j, 0] = 0
obs[i, j, 1] = 1
obs[i, j, 2] = 0
for i in range(self.human_num):
obs[self.human_list[i].pos[0], self.human_list[i].pos[1], 0] = 1
obs[self.human_list[i].pos[0], self.human_list[i].pos[1], 1] = 0
obs[self.human_list[i].pos[0], self.human_list[i].pos[1], 2] = 0
return obs.reshape((1, self.map_size, self.map_size, 3))
def get_drone_obs(self, drone):
obs_size = 2 * drone.view_range - 1
obs = np.ones((obs_size, obs_size, 3))
for i in range(obs_size):
for j in range(obs_size):
x = i + drone.pos[0] - drone.view_range + 1
y = j + drone.pos[1] - drone.view_range + 1
for k in range(self.human_num):
if self.human_list[k].pos[0] == x and self.human_list[k].pos[1] == y:
obs[i, j, 0] = 1
obs[i, j, 1] = 0
obs[i, j, 2] = 0
if x>=0 and x<=self.map_size-1 and y>=0 and y<=self.map_size-1:
if self.land_mark_map[x, y] == 1:
obs[i, j, 0] = 0
obs[i, j, 1] = 0
obs[i, j, 2] = 0
if self.land_mark_map[x, y] == 2:
obs[i, j, 0] = 0
obs[i, j, 1] = 1
obs[i, j, 2] = 0
else:
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
if (drone.view_range - 1 - i)*(drone.view_range - 1 - i)+(drone.view_range - 1 - j)*(drone.view_range - 1 - j) > drone.view_range*drone.view_range:
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
return obs
def get_drones_obs(self):
o = []
for drone in self.drone_list:
obs_size = 2 * drone.view_range - 1
obs = np.ones((obs_size, obs_size, 3))
for i in range(obs_size):
for j in range(obs_size):
x = i + drone.pos[0] - drone.view_range + 1
y = j + drone.pos[1] - drone.view_range + 1
for k in range(self.human_num):
if self.human_list[k].pos[0] == x and self.human_list[k].pos[1] == y:
obs[i, j, 0] = 1
obs[i, j, 1] = 0
obs[i, j, 2] = 0
if x>=0 and x<=self.map_size-1 and y>=0 and y<=self.map_size-1:
if self.land_mark_map[x, y] == 1:
obs[i, j, 0] = 0
obs[i, j, 1] = 0
obs[i, j, 2] = 0
if self.land_mark_map[x, y] == 2:
obs[i, j, 0] = 0
obs[i, j, 1] = 1
obs[i, j, 2] = 0
else:
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
if (drone.view_range - 1 - i)*(drone.view_range - 1 - i)+(drone.view_range - 1 - j)*(drone.view_range - 1 - j) > drone.view_range*drone.view_range:
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
o.append(obs)
return np.asarray(o)
def get_joint_obs(self):
obs = np.ones((self.map_size, self.map_size, 3))
for i in range(self.map_size):
for j in range(self.map_size):
obs[i, j, 0] = 0.5
obs[i, j, 1] = 0.5
obs[i, j, 2] = 0.5
for k in range(self.drone_num):
temp = self.get_drone_obs(self.drone_list[k])
size = temp.shape[0]
for i in range(size):
for j in range(size):
x = i + self.drone_list[k].pos[0] - self.drone_list[k].view_range + 1
y = j + self.drone_list[k].pos[1] - self.drone_list[k].view_range + 1
if_obs = True
if temp[i, j, 0] == 0.5 and temp[i, j, 1] == 0.5 and temp[i, j, 2] == 0.5:
if_obs = False
if if_obs == True:
obs[x, y, 0] = temp[i, j, 0]
obs[x, y, 1] = temp[i, j, 1]
obs[x, y, 2] = temp[i, j, 2]
return obs
def rand_reset_drone_pos(self):
for k in range(self.drone_num):
self.drone_list[k].pos = [random.randint(0, self.map_size-1), random.randint(0, self.map_size-1)]
def drone_step(self, drone_act_list):
if len(drone_act_list) != self.drone_num:
return
for k in range(self.drone_num):
if drone_act_list[k] == 0:
if self.drone_list[k].pos[0] > 0:
self.drone_list[k].pos[0] = self.drone_list[k].pos[0] - 1
elif drone_act_list[k] == 1:
if self.drone_list[k].pos[0] < self.map_size - 1:
self.drone_list[k].pos[0] = self.drone_list[k].pos[0] + 1
elif drone_act_list[k] == 2:
if self.drone_list[k].pos[1] > 0:
self.drone_list[k].pos[1] = self.drone_list[k].pos[1] - 1
elif drone_act_list[k] == 3:
if self.drone_list[k].pos[1] < self.map_size - 1:
self.drone_list[k].pos[1] = self.drone_list[k].pos[1] + 1
get_obs = self.get_drones_obs()
rewards = []
done = False
for obs in get_obs:
reward = 0
for i in range(2 * self.view_range - 1):
for j in range(2 * self.view_range - 1):
if obs[i][j][0] == 1 and obs[i][j][1] == 0 and obs[i][j][2] == 0:
reward = reward + 1
rewards.append(reward)
return rewards, done
def human_step(self, human_act_list):
if len(human_act_list) != self.human_num:
return
for k in range(self.human_num):
if human_act_list[k] == 0:
if self.human_list[k].pos[0] > 0:
free_space = self.land_mark_map[self.human_list[k].pos[0] - 1, self.human_list[k].pos[1]]
if free_space == 0:
self.human_list[k].pos[0] = self.human_list[k].pos[0] - 1
elif human_act_list[k] == 1:
if self.human_list[k].pos[0] < self.map_size - 1:
free_space = self.land_mark_map[self.human_list[k].pos[0] + 1, self.human_list[k].pos[1]]
if free_space == 0:
self.human_list[k].pos[0] = self.human_list[k].pos[0] + 1
elif human_act_list[k] == 2:
if self.human_list[k].pos[1] > 0:
free_space = self.land_mark_map[self.human_list[k].pos[0], self.human_list[k].pos[1] - 1]
if free_space == 0:
self.human_list[k].pos[1] = self.human_list[k].pos[1] - 1
elif human_act_list[k] == 3:
if self.human_list[k].pos[1] < self.map_size - 1:
free_space = self.land_mark_map[self.human_list[k].pos[0], self.human_list[k].pos[1] + 1]
if free_space == 0:
self.human_list[k].pos[1] = self.human_list[k].pos[1] + 1
def step(self, human_act_list, drone_act_list):
self.drone_step(drone_act_list)
self.human_step(human_act_list)
get_obs = self.get_drones_obs()
rewards = []
done = False
for obs in get_obs:
reward = 0
for i in range(2 * self.view_range - 1):
for j in range(2 * self.view_range - 1):
if obs[i][j][0] == 1 and obs[i][j][1] == 0 and obs[i][j][2] == 0:
reward = reward + 1
reward = reward - 0.01
rewards.append(reward)
return rewards, done
def render(self):
get_obs = self.get_joint_obs()
'''
obs_shape: (self.map_size, self.map_size, 3)
huise:(0.5, 0.5, 0.5)
baise:(1,1,1)
hongse:(1,0,0)
lvse:(0,1,0)
heise:(0, 0 ,0)
'''
size = 10
obs = np.ones((self.map_size * size, self.map_size * size, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if get_obs[i][j][0] == 0.5:
cv2.rectangle(obs, (i * size, ((self.map_size-1) - j) * size), (i * size + size, ((self.map_size-1) - j) * size + size), (255,0,0), -1)
if get_obs[i][j][0] == 1 and get_obs[i][j][1] == 0 and get_obs[i][j][2] == 0:
cv2.rectangle(obs, (i * size, ((self.map_size-1) - j) * size), (i * size + size, ((self.map_size-1) - j) * size + size), (0, 0, 255), -1)
if get_obs[i][j][0] == 0 and get_obs[i][j][1] == 1 and get_obs[i][j][2] == 0:
cv2.rectangle(obs, (i * size, ((self.map_size-1) - j) * size), (i * size + size, ((self.map_size-1) - j) * size + size), (0, 255, 0), -1)
if get_obs[i][j][0] == 0 and get_obs[i][j][1] == 0 and get_obs[i][j][2] == 0:
cv2.rectangle(obs, (i * size, ((self.map_size-1) - j) * size), (i * size + size, ((self.map_size-1) - j) * size + size), (0, 0, 0), -1)
cv2.imshow('image', obs)
cv2.waitKey(10)
import time
if __name__ == '__main__':
env = EnvDrones(map_size=50, drone_num=1, view_range=10, tree_num=30, human_num=1) # map_size, drone_num, view_range, tree_num, human_num
env.reset()
for i in range(len(env.drone_list)):
print(env.get_drone_obs(env.drone_list[i]).reshape((1, -1)).shape)
print(env.get_joint_obs().reshape((1, -1)).shape)
max_MC_iter = 1000
for MC_iter in range(max_MC_iter):
env.render()
time.sleep(0.1)
human_act_list = []
for i in range(env.human_num):
human_act_list.append(random.randint(0, 4))
drone_act_list = []
for i in range(env.drone_num):
drone_act_list.append(random.randint(0, 4))
reward, done = env.step(human_act_list, drone_act_list)
print(reward)
| 43.359736 | 167 | 0.488583 |
2aa22fb824305633e9f30ed5a47fff26b78fa984 | 2,177 | py | Python | src/analyzer/hooks.py | jsiloto/adaptive-cob | eb38e3b52c4927e3ac0a897142ad26fbc4eb82de | [
"MIT"
] | 1 | 2021-09-15T02:32:57.000Z | 2021-09-15T02:32:57.000Z | src/analyzer/hooks.py | jsiloto/adaptive-cob | eb38e3b52c4927e3ac0a897142ad26fbc4eb82de | [
"MIT"
] | null | null | null | src/analyzer/hooks.py | jsiloto/adaptive-cob | eb38e3b52c4927e3ac0a897142ad26fbc4eb82de | [
"MIT"
] | 2 | 2021-11-20T13:17:06.000Z | 2022-03-08T13:42:52.000Z | import numpy as np
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def usconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels_basic
out_channels = conv_module.out_channels_basic
if conv_module.slimmable_input:
in_channels = int(round(conv_module.in_channels_basic * conv_module.width_mult, 0))
if conv_module.slimmable_output:
out_channels = int(round(conv_module.out_channels_basic * conv_module.width_mult, 0))
print("conv_module.in_channels_basic", conv_module.in_channels_basic)
print("conv_module.out_channels_basic", conv_module.out_channels_basic)
print("in_channels", in_channels)
print("out_channels", out_channels)
groups = conv_module.groups
print("groups", groups)
filters_per_channel = out_channels // groups
conv_per_position_flops = int(np.prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(np.prod(output_dims))
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def empty_flops_counter_hook(module, input, output):
module.__flops__ = 0
# conv_module.conv.__flops_handle__.remove()
if hasattr(conv_module, 'conv'):
conv_module.conv.register_forward_hook(empty_flops_counter_hook)
# if conv_module.depthwise:
# print(groups)
# print(filters_per_channel)
#
# exit()
# print(conv_module.__dict__)
# print(conv_module.conv.__dict__)
# exit()
# print(conv_module.__flops__)
# print(conv_module.conv.__flops__)
# print(conv_module)
| 31.550725 | 93 | 0.72531 |
e847261152f6c7bf661105cf148d6c2831f27c28 | 1,034 | py | Python | tests/0cli_test.py | ncgr/DSNormalizer | 016af547578594e8d2eb0988c7cf070c78796890 | [
"BSD-3-Clause"
] | null | null | null | tests/0cli_test.py | ncgr/DSNormalizer | 016af547578594e8d2eb0988c7cf070c78796890 | [
"BSD-3-Clause"
] | 71 | 2020-04-13T17:14:01.000Z | 2021-08-02T11:33:37.000Z | tests/0cli_test.py | ncgr/DSNormalizer | 016af547578594e8d2eb0988c7cf070c78796890 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# standard library imports
import contextlib
import os
from pathlib import Path
# third-party imports
import pytest
import sh
@contextlib.contextmanager
def working_directory(path):
"""Change working directory in context."""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
def test_cli(tmp_path):
print("testing basic cli function")
with working_directory(tmp_path):
try:
output = sh.bionorm()
except sh.ErrorReturnCode as e:
print(e)
pytest.fail(e)
print(output)
assert "Usage:" in output
assert "Options:" in output
assert "Commands:" in output
def test_version(tmp_path):
print("testing version")
with working_directory(tmp_path):
try:
output = sh.bionorm(["--version"])
except sh.ErrorReturnCode as e:
print(e)
pytest.fail(e)
print(output)
assert "version" in output
| 22 | 46 | 0.609284 |
a4d14b0adad138db16d70be4c0965d6ec60a35b7 | 6,653 | py | Python | insights/client/insights_spec.py | psachin/insights-core | 8122ffee1ffc9713fd8bb765d5f9afd723c3c90a | [
"Apache-2.0"
] | 1 | 2020-02-19T06:36:22.000Z | 2020-02-19T06:36:22.000Z | insights/client/insights_spec.py | psachin/insights-core | 8122ffee1ffc9713fd8bb765d5f9afd723c3c90a | [
"Apache-2.0"
] | null | null | null | insights/client/insights_spec.py | psachin/insights-core | 8122ffee1ffc9713fd8bb765d5f9afd723c3c90a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import os
import errno
import shlex
import logging
import six
from subprocess import Popen, PIPE, STDOUT
from tempfile import NamedTemporaryFile
from insights.util import mangle
from .constants import InsightsConstants as constants
logger = logging.getLogger(__name__)
def shlex_split(cmd):
if six.PY3:
return shlex.split(cmd)
else:
return shlex.split(cmd.encode('utf-8'))
class InsightsSpec(object):
'''
A spec loaded from the uploader.json
'''
def __init__(self, config, spec, exclude):
self.config = config
# exclusions patterns for this spec
self.exclude = exclude
# pattern for spec collection
self.pattern = spec['pattern'] if spec['pattern'] else None
class InsightsCommand(InsightsSpec):
'''
A command spec
'''
def __init__(self, config, spec, exclude, mountpoint):
InsightsSpec.__init__(self, config, spec, exclude)
self.command = spec['command'].replace(
'{CONTAINER_MOUNT_POINT}', mountpoint)
self.archive_path = mangle.mangle_command(self.command)
if not six.PY3:
self.command = self.command.encode('utf-8', 'ignore')
def get_output(self):
'''
Execute a command through system shell. First checks to see if
the requested command is executable. Returns (returncode, stdout, 0)
'''
# all commands should timeout after a long interval so the client does not hang
# prepend native nix 'timeout' implementation
timeout_command = 'timeout %s %s' % (self.config.cmd_timeout, self.command)
# ensure consistent locale for collected command output
cmd_env = {'LC_ALL': 'C'}
args = shlex.split(timeout_command)
# never execute this stuff
if set.intersection(set(args), constants.command_blacklist):
raise RuntimeError("Command Blacklist: " + self.command)
try:
logger.debug('Executing: %s', args)
proc0 = Popen(args, shell=False, stdout=PIPE, stderr=STDOUT,
bufsize=-1, env=cmd_env, close_fds=True)
except OSError as err:
if err.errno == errno.ENOENT:
logger.debug('Command %s not found', self.command)
return
else:
raise err
dirty = False
cmd = "sed -rf " + constants.default_sed_file
sedcmd = Popen(shlex_split(cmd),
stdin=proc0.stdout,
stdout=PIPE)
proc0.stdout.close()
proc0 = sedcmd
if self.exclude is not None:
exclude_file = NamedTemporaryFile()
exclude_file.write("\n".join(self.exclude))
exclude_file.flush()
cmd = "grep -F -v -f %s" % exclude_file.name
proc1 = Popen(shlex_split(cmd),
stdin=proc0.stdout,
stdout=PIPE)
proc0.stdout.close()
stderr = None
if self.pattern is None or len(self.pattern) == 0:
stdout, stderr = proc1.communicate()
# always log return codes for debug
logger.debug('Proc1 Status: %s', proc1.returncode)
logger.debug('Proc1 stderr: %s', stderr)
proc0 = proc1
dirty = True
if self.pattern is not None and len(self.pattern):
pattern_file = NamedTemporaryFile()
pattern_file.write("\n".join(self.pattern).encode('utf-8'))
pattern_file.flush()
cmd = "grep -F -f %s" % pattern_file.name
proc2 = Popen(shlex_split(cmd),
stdin=proc0.stdout,
stdout=PIPE)
proc0.stdout.close()
stdout, stderr = proc2.communicate()
# always log return codes for debug
logger.debug('Proc2 Status: %s', proc2.returncode)
logger.debug('Proc2 stderr: %s', stderr)
proc0 = proc2
dirty = True
if not dirty:
stdout, stderr = proc0.communicate()
# Required hack while we still pass shell=True to Popen; a Popen
# call with shell=False for a non-existant binary will raise OSError.
if proc0.returncode == 126 or proc0.returncode == 127:
stdout = "Could not find cmd: %s", self.command
logger.debug("Proc0 Status: %s", proc0.returncode)
logger.debug("Proc0 stderr: %s", stderr)
return stdout.decode('utf-8', 'ignore').strip()
class InsightsFile(InsightsSpec):
'''
A file spec
'''
def __init__(self, spec, exclude, mountpoint):
InsightsSpec.__init__(self, None, spec, exclude)
# substitute mountpoint for collection
self.real_path = os.path.join(mountpoint, spec['file'].lstrip('/'))
self.archive_path = spec['file']
def get_output(self):
'''
Get file content, selecting only lines we are interested in
'''
if not os.path.isfile(self.real_path):
logger.debug('File %s does not exist', self.real_path)
return
cmd = []
cmd.append('sed'.encode('utf-8'))
cmd.append('-rf'.encode('utf-8'))
cmd.append(constants.default_sed_file.encode('utf-8'))
cmd.append(self.real_path.encode('utf8'))
sedcmd = Popen(cmd,
stdout=PIPE)
if self.exclude is not None:
exclude_file = NamedTemporaryFile()
exclude_file.write("\n".join(self.exclude))
exclude_file.flush()
cmd = "grep -v -F -f %s" % exclude_file.name
args = shlex_split(cmd)
proc = Popen(args, stdin=sedcmd.stdout, stdout=PIPE)
sedcmd.stdout.close()
stdin = proc.stdout
if self.pattern is None:
output = proc.communicate()[0]
else:
sedcmd = proc
if self.pattern is not None:
pattern_file = NamedTemporaryFile()
pattern_file.write("\n".join(self.pattern).encode('utf-8'))
pattern_file.flush()
cmd = "grep -F -f %s" % pattern_file.name
args = shlex_split(cmd)
proc1 = Popen(args, stdin=sedcmd.stdout, stdout=PIPE)
sedcmd.stdout.close()
if self.exclude is not None:
stdin.close()
output = proc1.communicate()[0]
if self.pattern is None and self.exclude is None:
output = sedcmd.communicate()[0]
return output.decode('utf-8', 'ignore').strip()
| 34.117949 | 87 | 0.578837 |
59cae9bef8e8b1cfa018bd5ff0070fc0a9b6ce06 | 13,045 | py | Python | elevenclock/lang/lang_ar.py | wanderleihuttel/ElevenClock | de4272a650111233acf36c909c7e269c8dc810d2 | [
"Apache-2.0"
] | null | null | null | elevenclock/lang/lang_ar.py | wanderleihuttel/ElevenClock | de4272a650111233acf36c909c7e269c8dc810d2 | [
"Apache-2.0"
] | null | null | null | elevenclock/lang/lang_ar.py | wanderleihuttel/ElevenClock | de4272a650111233acf36c909c7e269c8dc810d2 | [
"Apache-2.0"
] | null | null | null | # INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Aquest text està en anglès: valor {0}"
# So it would look like: "ORIGINAL_TEXT" : "TRANSLATED_TEXT",
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_3_2_1 = {
"Open online help to troubleshoot problems": "",
"Reset ElevenClock preferences to defaults": "",
"Specify a minimum width for the clock": "",
"Search on the settings": "",
"No results were found": "",
}
lang_3_2 = lang_3_2_1 | {
"Use system accent color as background color": "",
"Check only the focused window on the fullscreen check": "",
"Clock on monitor {0}": "",
"Move to the left": "",
"Show this clock on the left": "",
"Show this clock on the right": "",
"Restore clock position": "",
}
lang_3_1 = lang_3_2 | {
"W": "أ", # The initial of the word week in your language: W for week, S for setmana, etc.
"Disable the notification badge": "عطل شارة الاشعارات",
"Override clock default height": "تجاهل الارتفاع الافتراضي للساعة",
"Adjust horizontal clock position": "ضبط وضع الساعة الأفقي",
"Adjust vertical clock position": "ضبط وضع الساعة الرأسي",
"Export log as a file": "تصدير السجل كملف",
"Copy log to clipboard": "انسخ السجل الي الحافظة",
"Announcements:": "اعلانات",
"Fetching latest announcement, please wait...": "يأتي باحدث الاعلانات, برجاء الانتظار",
"Couldn't load the announcements. Please try again later": "لم يستطع تحميل الاعلانات. برجاء المحاولة لاحقا",
"ElevenClock's log": "سجل ElevenClock",
"Pick a color": "اختر لون"
}
lang_3 = lang_3_1 | {
"Hide the clock during 10 seconds when clicked": "اخفاء الساعة خلال 10 ثوان عند النقر فوقها",
"Enable low-cpu mode": "تفعيل وضع وحدة المعالجة المنخفضة",
"You might lose functionalities, like the notification counter or the dynamic background": "يمكن ان تخسر بعض الوظائف, مثل عدد الاشعارات او الخلفية الديناميكية",
"Clock position and size:": ":حجم و موضع الساعة",
"Clock size preferences, position offset, clock at the left, etc.": "تفضيلات حجم الساعة, ازاحة الموضع, الساعة علي اليسار, الخ.",
"Reset monitor blacklisting status": "إعادة تعيين حالة القائمة السوداء للشاشة",
"Reset": "إعادة تعيين",
"Third party licenses": "تراخيص الطرف الثالث",
"View": "معاينة",
"ElevenClock": "ElevenClock",
"Monitor tools": "أدوات المراقبة",
"Blacklist this monitor": "اضافة الشاشة الي القائمة السوداء",
"Third Party Open-Source Software in Elevenclock {0} (And their licenses)": "برامج الطرف الثالث مفتوحة المصدر في Elevenclock {0} (و ترخيصهم)",
"ElevenClock is an Open-Source application made with the help of other libraries made by the community:": "ElevenClock هو برنامج مفتوح المصدر صنع بمساعدة برمجيات اخري صنعها مجتمع المبرمجين",
"Ok": "حسنا",
"More Info": "مزيد من المعلومات",
"About Qt": "عن Qt",
"Success": "نجح",
"The monitors were unblacklisted successfully.": "الشاشة تم ازلتها من القائمة السوداء بنجاح",
"Now you should see the clock everywhere": "الان يمكنك روئية الشاشة في اي مكان",
"Ok": "حسنا",
"Blacklist Monitor": "اضافة الشاشة الي القائمة السوداء",
"Blacklisting a monitor will hide the clock on this monitor permanently.": "اضافة الشاشة الي القائمة السوداء سوف يخفي الساعة عليها نهائيا",
"This action can be reverted from the settings window. under <b>Clock position and size</b>": "هذا الاختيار يمكن الغائة من قائمة الاعدادات. تحت <b>حجم وموضع الساعة</b>",
"Are you sure do you want to blacklist the monitor \"{0}\"?": "هل انت متأكد من انك تريد اضافة الشاشة الي القائمة السوداء \"{0}\"?",
"Yes": "نعم",
"No": "لا",
}
lang_2_9_2 = lang_3 | {
"Reload log": "اعادة تحميل السجل",
"Do not show the clock on secondary monitors": "لا تظهر الساعه علي الشاشات الثانوية",
"Disable clock taskbar background color (make clock transparent)": "ازالة لون خلفية ساعة شريط المهام (حول الساعة الي شفافة)",
"Open the welcome wizard": "افتح مساعد الاعداد الترحيبي",
" (ALPHA STAGE, MAY NOT WORK)": " (في مرحلة الترجبة, يمكن الا يعمل)",
"Welcome to ElevenClock": "مرحبا بك في ElevenClock",
"Skip": "اختصر",
"Start": "ابداء",
"Next": "التالي",
"Finish": "انهاء",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "مدير المهام",
"Change date and time": "تغير الوقت والتاريخ",
"Notification settings": "اعدادات الاشعارات",
"Updates, icon tray, language": "تحديثات, منطقة الاشعارات, اللغات",
"Hide extended options from the clock right-click menu (needs a restart to be aplied)": "إخفاء الخيارات الموسعة من قائمة النقر بزر الماوس الأيمن (يحتاج إلى إعادة تشغيل ليتم تطبيقها)",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "سلوك ملء الشاشة ، موضع الساعة ، ساعة الشاشة الرئيسية ، إعدادات متنوعة أخرى",
'Add the "Show Desktop" button on the left corner of every clock': 'أضف زر "إظهار سطح المكتب" في الزاوية اليسرى من كل ساعة',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': 'قد تحتاج إلى تعيين لون خلفية مخصص حتى يعمل هذا. ؛ مزيد من المعلومات <a href="{0}" style="color:DodgerBlue"> هنا </a>',
"Clock's font, font size, font color and background, text alignment": "خط الساعة وحجم الخط ولون الخط والخلفية ومحاذاة النص",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "تنسيق التاريخ ، تنسيق الوقت، الثواني، أيام الأسبوع، رقم الأسبوع، الإعدادات الإقليمية",
"Testing features and error-fixing tools": "ميزات الاختبار وأدوات إصلاح الأخطاء",
"Language pack author(s), help translating ElevenClock": "مؤلفو حزم اللغات ، ساعدوا في ترجمة ElevenClock",
"Info, report a bug, submit a feature request, donate, about": "معلومات، إبلاغ عن خطأ، إرسال طلب ميزة، تبرع، حول",
"Log, debugging information": "السجل، معلومات التصحيح",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "فرض الساعة في الجزء العلوي من الشاشة",
"Show the clock on the primary screen": "اعرض الساعة على الشاشة الرئيسية",
"Use a custom font color": "استخدم لون خط مخصص",
"Use a custom background color": "استخدم لون خلفية مخصص",
"Align the clock text to the center": "قم بمحاذاة نص الساعة إلى المركز",
"Select custom color": "حدد لونًا مخصصًا",
"Hide the clock when a program occupies all screens": "إخفاء الساعة عندما يشغل برنامج جميع الشاشات",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "استخدم خطًا مخصصًا",
"Use a custom font size": "استخدم حجم خط مخصص",
"Enable hide when multi-monitor fullscreen apps are running": "تمكين الإخفاء عند تشغيل تطبيقات متعددة الشاشات في وضع ملء الشاشة",
"<b>{0}</b> needs to be enabled to change this setting": "<b>{0}</b> يحتاج إلى التمكين لتغيير هذا الإعداد",
"<b>{0}</b> needs to be disabled to change this setting": "<b>{0}</b> يحتاج إلى التعطيل لتغيير هذا الإعداد",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": "(تم تعطيل هذه الميزة لأنها يجب أن تعمل بشكل افتراضي. إذا لم يكن كذلك ، الرجاء الإبلاغ عن خطأ)",
"ElevenClock's language": "لغات ElevenClock"
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "عن Qt6 (PySide6)",
"About": "عن",
"Alternative non-SSL update server (This might help with SSL errors)": "خادم تحديث بديل بدون SSL (يمكن ان يساعد ذلك في حل مشاكل SSL)",
"Fixes and other experimental features: (Use ONLY if something is not working)": "الإصلاحات والميزات التجريبية الأخرى: (استخدم فقط إذا كان هناك شيء لا يعمل)",
"Show week number on the clock": "إظهار رقم الأسبوع على الساعة"
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "إخفاء الساعة عند تشغيل RDP Client أو Citrix Workspace",
"Clock Appearance:": "مظهر الساعة",
"Force the clock to have black text": "اجبار الساعة علي استخدام لون خط اسود",
" - It is required that the Dark Text checkbox is disabled": "- يلزم الغاء تمكين اعداد الخط الاسود",
"Debbugging information:": "معلومات التصحيح",
"Open ElevenClock's log": "فتح سجل ElevenClock",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "إظهار الساعة على الشاشة الرئيسية (مفيد إذا تم ضبط الساعة على اليسار)",
"Show weekday on the clock" :"عرض أيام الأسبوع على مدار الساعة",
}
lang2_3 = lang2_4 | {
#Context menu
"ElevenClock Settings" :"إعدادات ElevenClock", # Also settings title
"Reload Clocks" :"إعادة تحميل الساعات",
"ElevenClock v{0}" :"ElevenClock اصدار{0}",
"Restart ElevenClock" :"اعادة تشغيل ElevenClock",
"Hide ElevenClock" :"اخفاء ElevenClock",
"Quit ElevenClock" :"اغلاق ElevenClock",
#General settings section
"General Settings:" :"اعدادات عامة",
"Automatically check for updates" :"البحث عن التحديثات تلقائيا",
"Automatically install available updates" :"تثبيت التحديثات المتوفرة تلقائيا",
"Enable really silent updates" :"تفعيل التحديث بصمت",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"تجاوز التحقق من مصداقية مزود التحديث (غير مستحسن ، على مسؤوليتك الخاصة)",
"Show ElevenClock on system tray" :"اظهار ElevenClock في منطقة الاشعارت",
"Alternative clock alignment (may not work)" :"محاذاة الساعة البديلة (قد لا تعمل)",
"Change startup behaviour" :"تغيير سلوك بدء التشغيل",
"Change" :"تغير",
"<b>Update to the latest version!</b>" :"<b>حدث الي احدث اصدار</b>",
"Install update" :"تثبيت التحديثات",
#Clock settings
"Clock Settings:" :"اعدادات الساعة",
"Hide the clock in fullscreen mode" :"اخفاء الساعة في وضع ملء الشاشة",
"Hide the clock when RDP client is active" :"إخفاء الساعة عندما يكون عميل RDP نشطًا",
"Force the clock to be at the bottom of the screen" :"فرض الساعة في أسفل الشاشة",
"Show the clock when the taskbar is set to hide automatically" :"إظهار الساعة عندما يكون شريط المهام مضبوطًا على الإخفاء تلقائيًا",
"Fix the hyphen/dash showing over the month" :"إصلاح الواصلة / الشرطة التي تظهر على مدار الشهر",
"Force the clock to have white text" :"إجبار الساعة على الحصول على لون خط أبيض",
"Show the clock at the left of the screen" :"اعرض الساعة على يسار الشاشة",
#Date & time settings
"Date & Time Settings:" :"إعدادات التاريخ والوقت:",
"Show seconds on the clock" :"عرض الثواني على الساعة",
"Show date on the clock" :"عرض التاريخ على الساعة",
"Show time on the clock" :"عرض الوقت على الساعة",
"Change date and time format (Regional settings)" :"تغيير تنسيق التاريخ والوقت (الإعدادات الإقليمية)",
"Regional settings" :"الإعدادات الإقليمية",
#About the language pack
"About the language pack:" :"حول حزمة اللغة:",
"Translated to English by martinet101" :"ترجمت الي الانجيليزية بفضل martinet101", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translate ElevenClock to your language" :"ترجمت الي لغتك",
"Get started" :"البدء",
#About ElevenClock
"About ElevenClock version {0}:" :"حول إصدار ElevenClock {0}:",
"View ElevenClock's homepage" :"عرض الصفحة الرئيسية ElevenClock",
"Open" :"فتح",
"Report an issue/request a feature" :"ابلاغ عن مشكلة/طلب اضافة ميزة",
"Report" :"ابلاغ",
"Support the dev: Give me a coffee☕" :"☕ادعم المطور:اشتري لي كوب من القهوة",
"Open page" :"افتح صفحة",
"Icons by Icons8" :"الايقونات من Icon8", # Here, the word "Icons8" should not be translated
"Webpage" :"صفحة انترنت",
"Close settings" :"اغلاق الاعدادات",
"Close" :"اغلاق",
}
lang = lang2_3 | 61.533019 | 260 | 0.626064 |
88773cdd9ebb591788d7b9a95654ac46bfcd672b | 439 | py | Python | tests/integration/optimizers/pods/dummy_evaluate.py | facebbook/jina | e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873 | [
"Apache-2.0"
] | null | null | null | tests/integration/optimizers/pods/dummy_evaluate.py | facebbook/jina | e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873 | [
"Apache-2.0"
] | 2 | 2021-02-15T01:40:38.000Z | 2021-02-15T02:00:21.000Z | tests/integration/optimizers/pods/dummy_evaluate.py | facebbook/jina | e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873 | [
"Apache-2.0"
] | null | null | null | from jina.executors.evaluators.text import BaseTextEvaluator
class DummyTextEvaluator(BaseTextEvaluator):
@property
def metric(self) -> str:
return 'DummyTextEvaluator'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def evaluate(self, actual: str, desired: str, *args, **kwargs) -> float:
if actual == desired:
return 1.0
else:
return 0.0
| 25.823529 | 76 | 0.624146 |
bba059e3dbc8e90b0b2d153768b801859b686326 | 837 | py | Python | preprocessing.py | swkang73/bcbn | 36218ed8073c33953f2de21698aeb689e82582cc | [
"MIT"
] | 1 | 2020-11-02T22:56:17.000Z | 2020-11-02T22:56:17.000Z | preprocessing.py | swkang73/bcbn | 36218ed8073c33953f2de21698aeb689e82582cc | [
"MIT"
] | null | null | null | preprocessing.py | swkang73/bcbn | 36218ed8073c33953f2de21698aeb689e82582cc | [
"MIT"
] | null | null | null | import numpy as np, pandas as pd
from scipy import stats
"""
Notes on Analysis:
- we have mean, se, & worst on radius, texture, perimeter, area, smoothness, compactness,
concavity, concave points, symmetry, fractal dimensions
1st preprocessing: normalize each columns using z-score
"""
print("Import data")
df = pd.read_csv("data.csv")
print("get z-score for each")
new_data = {
'id': df['id'],
'diagnosis': df['diagnosis']
}
print("create new attributes by z-score")
features = set(df.columns.to_list()) - set(["id", "diagnosis"])
for f in features:
if '_se' not in f:
zscores = stats.zscore(df[f])
new_data[f] = np.array(zscores > 0, dtype=int)
new_data['diagnosis'] = [int(v=="M") for v in df['diagnosis']]
print("export processed data")
ndf = pd.DataFrame.from_dict(new_data)
ndf.to_csv("pdata.csv", index=False)
| 23.25 | 89 | 0.694146 |
833da29d750a30bb6f824251a2ab64f9639f586d | 1,082 | py | Python | Pitch-Based Approach/preprocessing/midi_class_mapping.py | uvashisth/Sonification-using-Deep-Learning | a0917e785c35aa5fadcbb258e938c58071b4e482 | [
"MIT"
] | 2 | 2020-05-20T04:29:02.000Z | 2020-05-20T04:29:13.000Z | Pitch-Based Approach/preprocessing/midi_class_mapping.py | uvashisth/Sonification-using-Deep-Learning | a0917e785c35aa5fadcbb258e938c58071b4e482 | [
"MIT"
] | null | null | null | Pitch-Based Approach/preprocessing/midi_class_mapping.py | uvashisth/Sonification-using-Deep-Learning | a0917e785c35aa5fadcbb258e938c58071b4e482 | [
"MIT"
] | 1 | 2020-05-20T04:40:49.000Z | 2020-05-20T04:40:49.000Z |
class MidiClassMapping():
'''
'''
def __init__(self):
self.midi_list=[]
self.note_to_int={}
self.int_to_note={}
self.max_midi_value=0
self.min_midi_value=0
def midi_notes_to_class_mapping(self,notes,midi_notes_mapping):
'''
Parameters:
- notes (float) :
- midi_notes_mapping (torch.Tensor) :
Returns:
- two generators
'''
for note in notes:
for midi,note_value in midi_notes_mapping.items():
if str(note) in note_value:
self.midi_list.append(midi)
self.midi_list=sorted(self.midi_list)
self.max_midi_value=self.midi_list[len(self.midi_list)-1]
self.min_midi_value=self.midi_list[0]
self.note_to_int = dict((note, number) for number, note in enumerate(self.midi_list))
self.int_to_note={note:ii for ii,note in self.note_to_int.items()}
return self.note_to_int,self.int_to_note,self.max_midi_value,self.min_midi_value | 28.473684 | 93 | 0.596118 |
789673d3218bed2eef89fce2088d35215220e945 | 8,554 | py | Python | src/SALib/sample/saltelli.py | zjzh/SALib | b6b6b5cab3388f3b80590c98d66aca7dc784d894 | [
"MIT"
] | 573 | 2015-07-14T06:17:59.000Z | 2022-03-31T03:42:00.000Z | src/SALib/sample/saltelli.py | zjzh/SALib | b6b6b5cab3388f3b80590c98d66aca7dc784d894 | [
"MIT"
] | 339 | 2015-07-08T13:30:16.000Z | 2022-03-25T07:48:09.000Z | src/SALib/sample/saltelli.py | zjzh/SALib | b6b6b5cab3388f3b80590c98d66aca7dc784d894 | [
"MIT"
] | 191 | 2015-07-13T09:00:07.000Z | 2022-03-29T22:49:26.000Z | from typing import Dict, Optional
import math
import warnings
import numpy as np
from . import common_args
from . import sobol_sequence
from ..util import (scale_samples, read_param_file,
compute_groups_matrix, _check_groups)
def sample(problem: Dict, N: int, calc_second_order: bool = True,
skip_values: int = None):
"""Generates model inputs using Saltelli's extension of the Sobol' sequence.
The Sobol' sequence is a popular quasi-random low-discrepancy sequence used
to generate uniform samples of parameter space.
Returns a NumPy matrix containing the model inputs using Saltelli's sampling
scheme. Saltelli's scheme extends the Sobol' sequence in a way to reduce
the error rates in the resulting sensitivity index calculations. If
`calc_second_order` is False, the resulting matrix has ``N * (D + 2)`` rows,
where ``D`` is the number of parameters. If `calc_second_order` is `True`,
the resulting matrix has ``N * (2D + 2)`` rows. These model inputs are
intended to be used with :func:`SALib.analyze.sobol.analyze`.
Notes
-----
The initial points of the Sobol' sequence has some repetition (see Table 2
in Campolongo [1]), which can be avoided by setting the `skip_values`
parameter. Skipping values reportedly improves the uniformity of samples. It
has been shown, however, that naively skipping values may reduce accuracy,
increasing the number of samples needed to achieve convergence (see Owen [2]).
A recommendation adopted here is that both `skip_values` and `N` be a power
of 2, where `N` is the desired number of samples (see [2] and discussion in
[5] for further context). It is also suggested therein that
``skip_values >= N``.
The method now defaults to setting `skip_values` to a power of two that is
``>= N``. If `skip_values` is provided, the method now raises a UserWarning
in cases where sample sizes may be sub-optimal according to the recommendation
above.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate.
Ideally a power of 2 and <= `skip_values`.
calc_second_order : bool
Calculate second-order sensitivities (default True)
skip_values : int or None
Number of points in Sobol' sequence to skip, ideally a value of base 2
(default: a power of 2 >= N, or 16; whichever is greater)
References
----------
.. [1] Campolongo, F., Saltelli, A., Cariboni, J., 2011.
From screening to quantitative sensitivity analysis. A unified approach.
Computer Physics Communications 182, 978–988.
https://doi.org/10.1016/j.cpc.2010.12.039
.. [2] Owen, A. B., 2020.
On dropping the first Sobol' point.
arXiv:2008.08051 [cs, math, stat].
Available at: http://arxiv.org/abs/2008.08051 (Accessed: 20 April 2021).
.. [3] Saltelli, A., 2002.
Making best use of model evaluations to compute sensitivity indices.
Computer Physics Communications 145, 280–297.
https://doi.org/10.1016/S0010-4655(02)00280-1
.. [4] Sobol', I.M., 2001.
Global sensitivity indices for nonlinear mathematical models and
their Monte Carlo estimates.
Mathematics and Computers in Simulation,
The Second IMACS Seminar on Monte Carlo Methods 55, 271–280.
https://doi.org/10.1016/S0378-4754(00)00270-6
.. [5] Discussion: https://github.com/scipy/scipy/pull/10844
https://github.com/scipy/scipy/pull/10844#issuecomment-672186615
https://github.com/scipy/scipy/pull/10844#issuecomment-673029539
"""
# bit-shift test to check if `N` == 2**n
if not ((N & (N-1) == 0) and (N != 0 and N-1 != 0)):
msg = f"""
Convergence properties of the Sobol' sequence is only valid if
`N` ({N}) is equal to `2^n`.
"""
warnings.warn(msg)
if skip_values is None:
# If not specified, set skip_values to next largest power of 2
skip_values = int(2**math.ceil(math.log(N)/math.log(2)))
# 16 is arbitrarily selected here to avoid initial points
# for very low sample sizes
skip_values = max(skip_values, 16)
elif skip_values > 0:
M = skip_values
if not ((M & (M-1) == 0) and (M != 0 and M-1 != 0)):
msg = f"""
Convergence properties of the Sobol' sequence is only valid if
`skip_values` ({M}) is a power of 2.
"""
warnings.warn(msg)
# warning when N > skip_values
# (see: https://github.com/scipy/scipy/pull/10844#issuecomment-673029539)
n_exp = int(math.log(N, 2))
m_exp = int(math.log(M, 2))
if n_exp > m_exp:
msg = (
"Convergence may not be valid as the number of requested samples is"
f" > `skip_values` ({N} > {M})."
)
warnings.warn(msg)
elif skip_values == 0:
warnings.warn("Duplicate samples will be taken as no points are skipped.")
else:
assert isinstance(skip_values, int) and skip_values >= 0, \
"`skip_values` must be a positive integer."
D = problem['num_vars']
groups = _check_groups(problem)
if not groups:
Dg = problem['num_vars']
else:
G, group_names = compute_groups_matrix(groups)
Dg = len(set(group_names))
# Create base sequence - could be any type of sampling
base_sequence = sobol_sequence.sample(N + skip_values, 2 * D)
if calc_second_order:
saltelli_sequence = np.zeros([(2 * Dg + 2) * N, D])
else:
saltelli_sequence = np.zeros([(Dg + 2) * N, D])
index = 0
for i in range(skip_values, N + skip_values):
# Copy matrix "A"
for j in range(D):
saltelli_sequence[index, j] = base_sequence[i, j]
index += 1
# Cross-sample elements of "B" into "A"
for k in range(Dg):
for j in range(D):
if (not groups and j == k) or (groups and group_names[k] == groups[j]):
saltelli_sequence[index, j] = base_sequence[i, j + D]
else:
saltelli_sequence[index, j] = base_sequence[i, j]
index += 1
# Cross-sample elements of "A" into "B"
# Only needed if you're doing second-order indices (true by default)
if calc_second_order:
for k in range(Dg):
for j in range(D):
if (not groups and j == k) or (groups and group_names[k] == groups[j]):
saltelli_sequence[index, j] = base_sequence[i, j]
else:
saltelli_sequence[index, j] = base_sequence[i, j + D]
index += 1
# Copy matrix "B"
for j in range(D):
saltelli_sequence[index, j] = base_sequence[i, j + D]
index += 1
saltelli_sequence = scale_samples(saltelli_sequence, problem)
return saltelli_sequence
def cli_parse(parser):
"""Add method specific options to CLI parser.
Parameters
----------
parser : argparse object
Returns
----------
Updated argparse object
"""
parser.add_argument('--max-order', type=int, required=False, default=2,
choices=[1, 2],
help='Maximum order of sensitivity indices \
to calculate')
parser.add_argument('--skip-values', type=int, required=False, default=None,
help='Number of sample points to skip (default: next largest power of 2 from `samples`)')
# hacky way to remove an argument (seed option is not relevant for Saltelli)
remove_opts = [x for x in parser._actions if x.dest == 'seed']
[parser._handle_conflict_resolve(None, [('--seed', x), ('-s', x)]) for x in remove_opts]
return parser
def cli_action(args):
"""Run sampling method
Parameters
----------
args : argparse namespace
"""
problem = read_param_file(args.paramfile)
param_values = sample(problem, args.samples,
calc_second_order=(args.max_order == 2),
skip_values=args.skip_values)
np.savetxt(args.output, param_values, delimiter=args.delimiter,
fmt='%.' + str(args.precision) + 'e')
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
| 37.353712 | 113 | 0.610475 |
9716883542d661670cb7725d96e5a421f9159667 | 23,887 | py | Python | UMLRT2Kiltera_MM/Properties/Pattern/models/Trans2HListenBranchOUT_Connected_MDL.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | UMLRT2Kiltera_MM/Properties/Pattern/models/Trans2HListenBranchOUT_Connected_MDL.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | UMLRT2Kiltera_MM/Properties/Pattern/models/Trans2HListenBranchOUT_Connected_MDL.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | """
__Trans2HListenBranchOUT_Connected_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Mon Mar 9 11:38:19 2015
______________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__OUT2 import *
from MT_pre__Signal import *
from MT_pre__directLink_S import *
from MT_pre__Transition import *
from MT_pre__Trigger_S import *
from graph_MT_pre__Trigger_S import *
from graph_MT_pre__Signal import *
from graph_LHS import *
from graph_MT_pre__Transition import *
from graph_MT_pre__directLink_S import *
from graph_MT_pre__OUT2 import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def Trans2HListenBranchOUT_Connected_MDL(self, rootNode, MT_pre__UMLRT2Kiltera_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__UMLRT2Kiltera_MM ---
if( MT_pre__UMLRT2Kiltera_MMRootNode ):
# author
MT_pre__UMLRT2Kiltera_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__UMLRT2Kiltera_MMRootNode.description.setValue('\n')
MT_pre__UMLRT2Kiltera_MMRootNode.description.setHeight(15)
# name
MT_pre__UMLRT2Kiltera_MMRootNode.name.setValue('')
MT_pre__UMLRT2Kiltera_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('Trans2HListenBranchOUT_Connected')
# --- ASG attributes over ---
self.obj63309=LHS(self)
self.obj63309.isGraphObjectVisual = True
if(hasattr(self.obj63309, '_setHierarchicalLink')):
self.obj63309._setHierarchicalLink(False)
# constraint
self.obj63309.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj63309.constraint.setHeight(15)
self.obj63309.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(20.0,20.0,self.obj63309)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63309.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63309)
self.globalAndLocalPostcondition(self.obj63309, rootNode)
self.obj63309.postAction( rootNode.CREATE )
self.obj63319=MT_pre__OUT2(self)
self.obj63319.isGraphObjectVisual = True
if(hasattr(self.obj63319, '_setHierarchicalLink')):
self.obj63319._setHierarchicalLink(False)
# MT_label__
self.obj63319.MT_label__.setValue('8')
# MT_pivotOut__
self.obj63319.MT_pivotOut__.setValue('element2')
# MT_subtypeMatching__
self.obj63319.MT_subtypeMatching__.setValue(('True', 0))
self.obj63319.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj63319.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63319.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj63319.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63319.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj63319.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63319.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj63319.MT_pivotIn__.setValue('element2')
self.obj63319.graphClass_= graph_MT_pre__OUT2
if self.genGraphics:
new_obj = graph_MT_pre__OUT2(60.0,240.0,self.obj63319)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__OUT2", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63319.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63319)
self.globalAndLocalPostcondition(self.obj63319, rootNode)
self.obj63319.postAction( rootNode.CREATE )
self.obj63310=MT_pre__Signal(self)
self.obj63310.isGraphObjectVisual = True
if(hasattr(self.obj63310, '_setHierarchicalLink')):
self.obj63310._setHierarchicalLink(False)
# MT_label__
self.obj63310.MT_label__.setValue('4')
# MT_pivotOut__
self.obj63310.MT_pivotOut__.setValue('element4')
# MT_subtypeMatching__
self.obj63310.MT_subtypeMatching__.setValue(('True', 0))
self.obj63310.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj63310.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63310.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj63310.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63310.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj63310.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63310.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj63310.MT_pivotIn__.setValue('element4')
self.obj63310.graphClass_= graph_MT_pre__Signal
if self.genGraphics:
new_obj = graph_MT_pre__Signal(240.0,200.0,self.obj63310)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Signal", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63310.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63310)
self.globalAndLocalPostcondition(self.obj63310, rootNode)
self.obj63310.postAction( rootNode.CREATE )
self.obj63313=MT_pre__directLink_S(self)
self.obj63313.isGraphObjectVisual = True
if(hasattr(self.obj63313, '_setHierarchicalLink')):
self.obj63313._setHierarchicalLink(False)
# MT_label__
self.obj63313.MT_label__.setValue('6')
# MT_pivotOut__
self.obj63313.MT_pivotOut__.setValue('')
self.obj63313.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj63313.MT_subtypeMatching__.setValue(('True', 0))
self.obj63313.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj63313.MT_pivotIn__.setValue('')
self.obj63313.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj63313.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63313.MT_pre__associationType.setHeight(15)
self.obj63313.graphClass_= graph_MT_pre__directLink_S
if self.genGraphics:
new_obj = graph_MT_pre__directLink_S(327.0,135.0,self.obj63313)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj63313.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63313)
self.globalAndLocalPostcondition(self.obj63313, rootNode)
self.obj63313.postAction( rootNode.CREATE )
self.obj63314=MT_pre__directLink_S(self)
self.obj63314.isGraphObjectVisual = True
if(hasattr(self.obj63314, '_setHierarchicalLink')):
self.obj63314._setHierarchicalLink(False)
# MT_label__
self.obj63314.MT_label__.setValue('7')
# MT_pivotOut__
self.obj63314.MT_pivotOut__.setValue('')
self.obj63314.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj63314.MT_subtypeMatching__.setValue(('True', 0))
self.obj63314.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj63314.MT_pivotIn__.setValue('')
self.obj63314.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj63314.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63314.MT_pre__associationType.setHeight(15)
self.obj63314.graphClass_= graph_MT_pre__directLink_S
if self.genGraphics:
new_obj = graph_MT_pre__directLink_S(427.0,215.0,self.obj63314)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj63314.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63314)
self.globalAndLocalPostcondition(self.obj63314, rootNode)
self.obj63314.postAction( rootNode.CREATE )
self.obj63322=MT_pre__directLink_S(self)
self.obj63322.isGraphObjectVisual = True
if(hasattr(self.obj63322, '_setHierarchicalLink')):
self.obj63322._setHierarchicalLink(False)
# MT_label__
self.obj63322.MT_label__.setValue('9')
# MT_pivotOut__
self.obj63322.MT_pivotOut__.setValue('')
self.obj63322.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj63322.MT_subtypeMatching__.setValue(('True', 0))
self.obj63322.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj63322.MT_pivotIn__.setValue('')
self.obj63322.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj63322.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63322.MT_pre__associationType.setHeight(15)
self.obj63322.graphClass_= graph_MT_pre__directLink_S
if self.genGraphics:
new_obj = graph_MT_pre__directLink_S(247.0,215.0,self.obj63322)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj63322.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63322)
self.globalAndLocalPostcondition(self.obj63322, rootNode)
self.obj63322.postAction( rootNode.CREATE )
self.obj63315=MT_pre__Transition(self)
self.obj63315.isGraphObjectVisual = True
if(hasattr(self.obj63315, '_setHierarchicalLink')):
self.obj63315._setHierarchicalLink(False)
# MT_pivotOut__
self.obj63315.MT_pivotOut__.setValue('element1')
# MT_subtypeMatching__
self.obj63315.MT_subtypeMatching__.setValue(('True', 0))
self.obj63315.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj63315.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63315.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj63315.MT_pivotIn__.setValue('element1')
# MT_label__
self.obj63315.MT_label__.setValue('1')
# MT_pre__cardinality
self.obj63315.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63315.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj63315.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63315.MT_pre__name.setHeight(15)
self.obj63315.graphClass_= graph_MT_pre__Transition
if self.genGraphics:
new_obj = graph_MT_pre__Transition(40.0,40.0,self.obj63315)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Transition", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63315.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63315)
self.globalAndLocalPostcondition(self.obj63315, rootNode)
self.obj63315.postAction( rootNode.CREATE )
self.obj63316=MT_pre__Trigger_S(self)
self.obj63316.isGraphObjectVisual = True
if(hasattr(self.obj63316, '_setHierarchicalLink')):
self.obj63316._setHierarchicalLink(False)
# MT_label__
self.obj63316.MT_label__.setValue('3')
# MT_pivotOut__
self.obj63316.MT_pivotOut__.setValue('element3')
# MT_subtypeMatching__
self.obj63316.MT_subtypeMatching__.setValue(('True', 0))
self.obj63316.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj63316.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63316.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj63316.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63316.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj63316.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj63316.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj63316.MT_pivotIn__.setValue('element3')
self.obj63316.graphClass_= graph_MT_pre__Trigger_S
if self.genGraphics:
new_obj = graph_MT_pre__Trigger_S(220.0,80.0,self.obj63316)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Trigger_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj63316.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj63316)
self.globalAndLocalPostcondition(self.obj63316, rootNode)
self.obj63316.postAction( rootNode.CREATE )
# Connections for obj63309 (graphObject_: Obj49) of type LHS
self.drawConnections(
)
# Connections for obj63319 (graphObject_: Obj57) of type MT_pre__OUT2
self.drawConnections(
)
# Connections for obj63310 (graphObject_: Obj50) of type MT_pre__Signal
self.drawConnections(
)
# Connections for obj63313 (graphObject_: Obj53) of type MT_pre__directLink_S
self.drawConnections(
(self.obj63313,self.obj63316,[327.0, 135.0, 417.0, 155.0],"true", 2) )
# Connections for obj63314 (graphObject_: Obj54) of type MT_pre__directLink_S
self.drawConnections(
(self.obj63314,self.obj63310,[427.0, 215.0, 437.0, 275.0],"true", 2) )
# Connections for obj63322 (graphObject_: Obj58) of type MT_pre__directLink_S
self.drawConnections(
(self.obj63322,self.obj63319,[247.0, 215.0, 257.0, 315.0],"true", 2) )
# Connections for obj63315 (graphObject_: Obj55) of type MT_pre__Transition
self.drawConnections(
(self.obj63315,self.obj63313,[237.0, 115.0, 327.0, 135.0],"true", 2),
(self.obj63315,self.obj63322,[237.0, 115.0, 247.0, 215.0],"true", 2) )
# Connections for obj63316 (graphObject_: Obj56) of type MT_pre__Trigger_S
self.drawConnections(
(self.obj63316,self.obj63314,[417.0, 155.0, 427.0, 215.0],"true", 2) )
newfunction = Trans2HListenBranchOUT_Connected_MDL
loadedMMName = ['MT_pre__UMLRT2Kiltera_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| 55.810748 | 632 | 0.668439 |
955ee96aae627593bd21e6f4da4a83915d7beb50 | 637 | py | Python | lesson05/louxiaohui/configmgt.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson05/louxiaohui/configmgt.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson05/louxiaohui/configmgt.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
def readconfig(filename, section, key=None):
config = configparser.ConfigParser()
config.read(filename)
section_list = config.sections()
# print (section_list)
if not section_list or section not in section_list:
return "config init is empty or section {} does not exist".format(section), False
if key:
return config.get(section, key), True
else:
return dict(config.items(section)), True
#result, ok = readconfig('config.ini', 'dbinfo')
#print(result)
| 26.541667 | 89 | 0.686028 |
fd9148e2115442292afbca3d3f65b37c5d40268d | 4,099 | py | Python | authome/backends.py | dbca-wa/authome | 7c1c8864e5449f360b80acb9583adfe59a77167e | [
"Apache-2.0"
] | 2 | 2019-07-02T06:23:54.000Z | 2021-12-14T04:02:40.000Z | authome/backends.py | dbca-wa/authome | 7c1c8864e5449f360b80acb9583adfe59a77167e | [
"Apache-2.0"
] | 9 | 2019-02-18T03:19:54.000Z | 2022-03-31T06:10:36.000Z | authome/backends.py | dbca-wa/authome | 7c1c8864e5449f360b80acb9583adfe59a77167e | [
"Apache-2.0"
] | 6 | 2019-01-22T08:10:16.000Z | 2022-02-15T02:42:03.000Z | import logging
import urllib.parse
import re
from django.conf import settings
from social_core.backends import azuread_b2c
from social_core.exceptions import AuthException
from .models import IdentityProvider, CustomizableUserflow
from .utils import get_redirect_domain
from .exceptions import AzureADB2CAuthenticateFailed
logger = logging.getLogger(__name__)
class AzureADB2COAuth2(azuread_b2c.AzureADB2COAuth2):
AUTHORIZATION_URL = '{base_url}/oauth2/v2.0/authorize'
OPENID_CONFIGURATION_URL = '{base_url}/v2.0/.well-known/openid-configuration'
ACCESS_TOKEN_URL = '{base_url}/oauth2/v2.0/token'
JWKS_URL = '{base_url}/discovery/v2.0/keys'
LOGOUT_URL = '{base_url}/oauth2/v2.0/logout?post_logout_redirect_uri={{}}'
@property
def policy(self):
request = self.strategy.request
if hasattr(request,"policy"):
policy = request.policy
else:
domain = get_redirect_domain(request)
userflow = CustomizableUserflow.get_userflow(domain)
if userflow.fixed:
logger.debug("Use the fixed userflow({1}.{2}) for domain({0})".format(domain,userflow.domain,userflow.fixed))
policy = userflow.fixed
elif not domain:
logger.debug("Use the default userflow({1}.{2}) for domain({0})".format(domain,userflow.domain,userflow.default))
policy = userflow.default
else:
idp = request.COOKIES.get(settings.PREFERED_IDP_COOKIE_NAME,None)
idp = IdentityProvider.get_idp(idp)
if idp and idp.userflow:
if idp == IdentityProvider.LOCAL_PROVIDER:
policy = userflow.email or idp.userflow
else:
policy = idp.userflow
else:
policy = userflow.default
logger.debug("Prefered idp is '{}', Choosed userflow is '{}', request domain is '{}' ".format(idp,policy,domain))
if not policy or not policy.lower().startswith('b2c_'):
raise AuthException('SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_POLICY is '
'required and should start with `b2c_`')
return policy
@property
def base_url(self):
return "{}/{}".format(self.setting('BASE_URL'),self.policy)
if self.policy.startswith("B2C_1_"):
return "{}/{}".format(self.setting('BASE_URL'),self.policy)
else:
return self.setting('BASE_URL')
def get_profile_edit_url(self,next_url,policy='B2C_1_email_profile'):
return "{base_url}/oauth2/v2.0/authorize?client_id={client_id}&redirect_uri={next_url}&scope=openid+email&response_type=code".format(
base_url=self.setting('BASE_URL').format(policy),
client_id=self.setting('KEY'),
next_url=urllib.parse.quote(next_url)
)
@property
def logout_url(self):
return self.LOGOUT_URL.format(base_url=self.base_url)
def auth_extra_arguments(self):
"""
Return extra arguments needed on auth process.
The defaults can be overridden by GET parameters.
"""
extra_arguments = super(AzureADB2COAuth2, self).auth_extra_arguments()
return extra_arguments
error_re = re.compile("^\s*(?P<code>[A-Z0-9]+)\s*:")
def process_error(self, data):
try:
super().process_error(data)
except Exception as ex:
error = self.strategy.request.GET.get("error_description")
error_code = None
if error:
m = self.error_re.search(error)
if m:
error_code = m.group('code')
if hasattr(self.strategy.request,"http_error_code"):
raise AzureADB2CAuthenticateFailed(self.strategy.request,self.strategy.request.http_error_code,error_code,self.strategy.request.http_error_message,ex)
else:
raise AzureADB2CAuthenticateFailed(self.strategy.request,400,error_code,"Failed to authenticate the user.{}",ex)
| 40.99 | 166 | 0.636253 |
6632651f11033f17a3e57d1478455ca9b94fba09 | 629 | py | Python | octopus/preprocessing/image.py | kimsup10/octopus | 9b646625d36ff26de0720555a06bec7d47784334 | [
"MIT"
] | null | null | null | octopus/preprocessing/image.py | kimsup10/octopus | 9b646625d36ff26de0720555a06bec7d47784334 | [
"MIT"
] | null | null | null | octopus/preprocessing/image.py | kimsup10/octopus | 9b646625d36ff26de0720555a06bec7d47784334 | [
"MIT"
] | null | null | null | from operator import itemgetter
import numpy as np
from PIL.Image import Image
from keras.preprocessing import image
from keras.applications.resnet50 import (ResNet50,
preprocess_input,
decode_predictions)
from . import process
model = ResNet50(weights='imagenet')
@process.register(Image)
def process(img):
X = image.img_to_array(img.resize((224, 224)))
X = np.expand_dims(X, axis=0)
X = preprocess_input(X)
preds = model.predict(X)
result = decode_predictions(preds, top=3)[0]
return list(map(itemgetter(1), result))
| 28.590909 | 60 | 0.647059 |
14cca30aaa322e0ca8993d37bb1572e15bd2820c | 826 | py | Python | kaifa_cnv/cnvdlinshi.py | AluuLL/initial-exper_python | 048d78b2aa0af0947d4232e46c3bdded2cb63180 | [
"MIT"
] | null | null | null | kaifa_cnv/cnvdlinshi.py | AluuLL/initial-exper_python | 048d78b2aa0af0947d4232e46c3bdded2cb63180 | [
"MIT"
] | null | null | null | kaifa_cnv/cnvdlinshi.py | AluuLL/initial-exper_python | 048d78b2aa0af0947d4232e46c3bdded2cb63180 | [
"MIT"
] | null | null | null | file = open("/disk/lulu/database/cnvd/pro/linsh.pro.txt")
#f = open("out.txt", "w")
for line in file:
# print line,
list = line.strip().split("\t")
# print list
if "|" in list[0]:
list1 = list[0].strip().split("|")
print list1[0],"\t",list[1],"\t",list[2],"\t",list[3],"\t",list[4],"\t",list[5],"\t",list[6],"\t",list[7]
print list1[1],"\t",list[1],"\t",list[2],"\t",list[3],"\t",list[4],"\t",list[5],"\t",list[6],"\t",list[7]
elif ","in list[0]:
list1 = list[0].strip().split(",")
print list1[0], "\t", list[1], "\t", list[2], "\t", list[3], "\t", list[4], "\t", list[5], "\t", list[6], "\t", \
list[7]
print list1[1], "\t", list[1], "\t", list[2], "\t", list[3], "\t", list[4], "\t", list[5], "\t", list[6], "\t", \
list[7]
| 45.888889 | 122 | 0.455206 |
de7f1fef96f85ea506f8993ec195a2c1f1a01ba4 | 7,111 | py | Python | tests/cli/test_auth.py | tedmiston/prefect | a2cb40c28c942b1d170db42a55bab99598a4dcd6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/cli/test_auth.py | tedmiston/prefect | a2cb40c28c942b1d170db42a55bab99598a4dcd6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/cli/test_auth.py | tedmiston/prefect | a2cb40c28c942b1d170db42a55bab99598a4dcd6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import uuid
from unittest.mock import MagicMock
import click
import pytest
from click.testing import CliRunner
import prefect
from prefect.cli.auth import auth
from prefect.utilities.configuration import set_temporary_config
def test_auth_init():
runner = CliRunner()
result = runner.invoke(auth)
assert result.exit_code == 0
assert "Handle Prefect Cloud authorization." in result.output
def test_auth_help():
runner = CliRunner()
result = runner.invoke(auth, ["--help"])
assert result.exit_code == 0
assert "Handle Prefect Cloud authorization." in result.output
def test_auth_login(patch_post, monkeypatch):
patch_post(
dict(
data=dict(
tenant="id",
user=[dict(default_membership=dict(tenant_id=str(uuid.uuid4())))],
)
)
)
client = MagicMock()
client.return_value.login_to_tenant = MagicMock(return_value=True)
monkeypatch.setattr("prefect.cli.auth.Client", client)
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["login", "--token", "test"])
assert result.exit_code == 0
def test_auth_login_client_error(patch_post):
patch_post(dict(errors=dict(error="bad")))
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["login", "--token", "test"])
assert result.exit_code == 0
assert "Error attempting to communicate with Prefect Cloud" in result.output
def test_auth_logout_after_login(patch_post, monkeypatch):
patch_post(
dict(
data=dict(
tenant="id",
user=[dict(default_membership=dict(tenant_id=str(uuid.uuid4())))],
)
)
)
client = MagicMock()
client.return_value.login_to_tenant = MagicMock(return_value=True)
monkeypatch.setattr("prefect.cli.auth.Client", client)
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["login", "--token", "test"])
assert result.exit_code == 0
result = runner.invoke(auth, ["logout"], input="Y")
assert result.exit_code == 0
def test_auth_logout_not_confirm(patch_post):
patch_post(dict(data=dict(tenant="id")))
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["logout"], input="N")
assert result.exit_code == 1
def test_auth_logout_no_active_tenant(patch_post):
patch_post(dict(data=dict(tenant="id")))
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["logout"], input="Y")
assert result.exit_code == 0
assert "No tenant currently active" in result.output
def test_list_tenants(patch_post):
patch_post(
dict(
data=dict(
tenant=[{"id": "id", "slug": "slug", "name": "name"}],
switch_tenant={
"access_token": "access_token",
"expires_in": "expires_in",
"refresh_token": "refresh_token",
},
)
)
)
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["list-tenants"])
assert result.exit_code == 0
assert "id" in result.output
assert "slug" in result.output
assert "name" in result.output
def test_switch_tenants(monkeypatch):
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
monkeypatch.setattr("prefect.cli.auth.Client", MagicMock())
runner = CliRunner()
result = runner.invoke(auth, ["switch-tenants", "--slug", "slug"])
assert result.exit_code == 0
assert "Tenant switched" in result.output
def test_switch_tenants(monkeypatch):
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
client = MagicMock()
client.return_value.login_to_tenant = MagicMock(return_value=False)
monkeypatch.setattr("prefect.cli.auth.Client", client)
runner = CliRunner()
result = runner.invoke(auth, ["switch-tenants", "--slug", "slug"])
assert result.exit_code == 0
assert "Unable to switch tenant" in result.output
def test_create_token(patch_post):
patch_post(dict(data=dict(create_api_token={"token": "token"})))
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["create-token", "-n", "name", "-s", "scope"])
assert result.exit_code == 0
assert "token" in result.output
def test_create_token_fails(patch_post):
patch_post(dict())
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["create-token", "-n", "name", "-s", "scope"])
assert result.exit_code == 0
assert "Issue creating API token" in result.output
def test_list_tokens(patch_post):
patch_post(dict(data=dict(api_token=[{"id": "id", "name": "name"}])))
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["list-tokens"])
assert result.exit_code == 0
assert "id" in result.output
assert "name" in result.output
def test_list_tokens_fails(patch_post):
patch_post(dict())
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["list-tokens"])
assert result.exit_code == 0
assert "Unable to list API tokens" in result.output
def test_revoke_token(patch_post):
patch_post(dict(data=dict(delete_api_token={"success": True})))
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["revoke-token", "--id", "id"])
assert result.exit_code == 0
assert "Token successfully revoked" in result.output
def test_revoke_token_fails(patch_post):
patch_post(dict())
with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
runner = CliRunner()
result = runner.invoke(auth, ["revoke-token", "--id", "id"])
assert result.exit_code == 0
assert "Unable to revoke token with ID id" in result.output
def test_check_override_function():
with set_temporary_config({"cloud.auth_token": "TOKEN"}):
with pytest.raises(click.exceptions.Abort):
prefect.cli.auth.check_override_auth_token()
def test_override_functions_on_commands():
with set_temporary_config({"cloud.auth_token": "TOKEN"}):
runner = CliRunner()
result = runner.invoke(auth, ["revoke-token", "--id", "id"])
assert result.exit_code == 1
| 32.769585 | 84 | 0.636479 |
c2c2b8678fdec8a47475a2ffd16370ddb0b62f8d | 4,201 | py | Python | src/00_core.py | grzegorzwojdyga/trl | 1921e71a7465a43dcc135d97821aa8b03bfebf8c | [
"Apache-2.0"
] | null | null | null | src/00_core.py | grzegorzwojdyga/trl | 1921e71a7465a43dcc135d97821aa8b03bfebf8c | [
"Apache-2.0"
] | null | null | null | src/00_core.py | grzegorzwojdyga/trl | 1921e71a7465a43dcc135d97821aa8b03bfebf8c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""00-core.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Ioc_ZLO6nfuoITf8Rc_Ez6UbFrv1GqZm
"""
# default_exp core
"""# Utility functions
> A set of utility functions used throughout the library.
"""
# export
import torch
import torch.nn.functional as F
import collections
import numpy as np
"""## General utils"""
# exports
def flatten_dict(nested, sep='/'):
"""Flatten dictionary and concatenate nested keys with separator."""
def rec(nest, prefix, into):
for k, v in nest.items():
if sep in k:
raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'")
if isinstance(v, collections.Mapping):
rec(v, prefix + k + sep, into)
else:
into[prefix + k] = v
flat = {}
rec(nested, '', flat)
return flat
def stack_dicts(stats_dicts):
"""Stack the values of a dict."""
results = dict()
for k in stats_dicts[0]:
stats_list = [torch.flatten(d[k]) for d in stats_dicts]
results[k] = torch.stack(stats_list)
return results
def add_suffix(input_dict, suffix):
"""Add suffix to dict keys."""
return dict((k + suffix, v) for k,v in input_dict.items())
"""## Torch utils"""
# exports
def pad_to_size(tensor, size, dim=1, padding=50256):
"""Pad tensor to size."""
t_size = tensor.size()[dim]
if t_size==size:
return tensor
else:
return torch.nn.functional.pad(tensor, (0,size-t_size), 'constant', padding)
def logprobs_from_logits(logits, labels):
"""
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
"""
logp = F.log_softmax(logits, dim=2)
logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1)
return logpy
def whiten(values, shift_mean=True):
"""Whiten values."""
mean, var = torch.mean(values), torch.var(values)
whitened = (values - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def clip_by_value(x, tensor_min, tensor_max):
"""
Tensor extenstion to torch.clamp
https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
"""
clipped = torch.max(torch.min(x, tensor_max), tensor_min)
return clipped
def entropy_from_logits(logits):
"""Calculate entropy from logits."""
pd = torch.nn.functional.softmax(logits, dim=-1)
entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd*logits, axis=-1)
return entropy
def average_torch_dicts(list_of_dicts):
"""Average values of a list of dicts wiht torch tensors."""
average_dict = dict()
for key in list_of_dicts[0].keys():
average_dict[key] = torch.mean(torch.stack([d[key] for d in list_of_dicts]), axis=0)
return average_dict
def stats_to_np(stats_dict):
"""Cast all torch.tensors in dict to numpy arrays."""
new_dict = dict()
for k, v in stats_dict.items():
if isinstance(v, torch.Tensor):
new_dict[k] = v.detach().cpu().numpy()
else:
new_dict[k] = v
if np.isscalar(new_dict[k]):
new_dict[k] = float(new_dict[k])
return new_dict
"""## BERT utils"""
# exports
def build_bert_batch_from_txt(text_list, tokenizer, device):
"""Create token id and attention mask tensors from text list for BERT classification."""
# tokenize
tensors = [tokenizer.encode(txt, return_tensors="pt").to(device) for txt in text_list]
# find max length to pad to
max_len = max([t.size()[1] for t in tensors])
# get padded tensors and attention masks
# (attention masks make bert ignore padding)
padded_tensors = []
attention_masks = []
for tensor in tensors:
attention_mask = torch.ones(tensor.size(), device=device)
padded_tensors.append(pad_to_size(tensor, max_len, padding=0))
attention_masks.append(pad_to_size(attention_mask, max_len, padding=0))
# stack all tensors
padded_tensors = torch.cat(padded_tensors)
attention_masks = torch.cat(attention_masks)
return padded_tensors, attention_masks
| 29.377622 | 92 | 0.652226 |
3dd646893ad724e335c9aa773fbd422a3de770ab | 43,773 | py | Python | gcloud/google-cloud-sdk/lib/googlecloudsdk/command_lib/run/flags.py | bopopescu/JobSniperRails | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | [
"MIT"
] | null | null | null | gcloud/google-cloud-sdk/lib/googlecloudsdk/command_lib/run/flags.py | bopopescu/JobSniperRails | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | [
"MIT"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | gcloud/google-cloud-sdk/lib/googlecloudsdk/command_lib/run/flags.py | bopopescu/JobSniperRails | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | [
"MIT"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides common arguments for the Run command surface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import os
import re
from googlecloudsdk.api_lib.container import kubeconfig
from googlecloudsdk.api_lib.run import global_methods
from googlecloudsdk.api_lib.services import enable_api
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.functions.deploy import env_vars_util
from googlecloudsdk.command_lib.run import config_changes
from googlecloudsdk.command_lib.run import exceptions as serverless_exceptions
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.command_lib.util.args import map_util
from googlecloudsdk.command_lib.util.args import repeated
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
_VISIBILITY_MODES = {
'internal': 'Visible only within the cluster.',
'external': 'Visible from outside the cluster.',
}
_PLATFORMS = collections.OrderedDict([
('managed', 'Fully managed version of Cloud Run. Use with the `--region` '
'flag or set the [run/region] property to specify a Cloud Run '
'region.'),
('gke', 'Cloud Run on Google Kubernetes Engine. Use with the `--cluster` '
'and `--cluster-location` flags or set the [run/cluster] and '
'[run/cluster_location] properties to specify a cluster in a given '
'zone.'),
])
_PLATFORMS_ALPHA = _PLATFORMS.copy()
_PLATFORMS_ALPHA.update(collections.OrderedDict([
('kubernetes', 'Use a Knative-compatible kubernetes cluster. Use with the '
'`--kubeconfig` and `--context` flags to specify a '
'kubeconfig file and the context for connecting.'),
]))
_PLATFORM_SHORT_DESCRIPTIONS = {
'managed': 'Cloud Run (fully managed)',
'gke': 'Cloud Run on GKE',
'kubernetes': 'a Kubernetes cluster',
}
_DEFAULT_KUBECONFIG_PATH = '~/.kube/config'
class ArgumentError(exceptions.Error):
pass
class KubeconfigError(exceptions.Error):
pass
def AddImageArg(parser):
"""Add an image resource arg."""
parser.add_argument(
'--image',
required=True,
help='Name of the container image to deploy (e.g. '
'`gcr.io/cloudrun/hello:latest`).')
_ARG_GROUP_HELP_TEXT = ('Only applicable if connecting to {platform_desc}. '
'Specify {platform} to use:')
def _GetOrAddArgGroup(parser, help_text):
"""Create a new arg group or return existing group with given help text."""
for arg in parser.arguments:
if arg.is_group and arg.help == help_text:
return arg
return parser.add_argument_group(help_text)
def GetManagedArgGroup(parser):
"""Get an arg group for managed CR-only flags."""
return _GetOrAddArgGroup(
parser,
_ARG_GROUP_HELP_TEXT.format(
platform='\'--platform=managed\'',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
def GetGkeArgGroup(parser):
"""Get an arg group for CRoGKE-only flags."""
return _GetOrAddArgGroup(
parser,
_ARG_GROUP_HELP_TEXT.format(
platform='\'--platform=gke\'',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
def GetKubernetesArgGroup(parser):
"""Get an arg group for --platform=kubernetes only flags."""
return _GetOrAddArgGroup(
parser,
_ARG_GROUP_HELP_TEXT.format(
platform='\'--platform=kubernetes\'',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
def GetClusterArgGroup(parser):
"""Get an arg group for any generic cluster flags."""
return _GetOrAddArgGroup(
parser,
_ARG_GROUP_HELP_TEXT.format(
platform='\'--platform=gke\' or \'--platform=kubernetes\'',
platform_desc='{} or {}'.format(
_PLATFORM_SHORT_DESCRIPTIONS['gke'],
_PLATFORM_SHORT_DESCRIPTIONS['kubernetes'])))
def AddAllowUnauthenticatedFlag(parser):
"""Add the --allow-unauthenticated flag."""
parser.add_argument(
'--allow-unauthenticated',
action=arg_parsers.StoreTrueFalseAction,
help='Whether to enable allowing unauthenticated access to the service. '
'This may take a few moments to take effect.')
def AddAsyncFlag(parser):
"""Add an async flag."""
base.ASYNC_FLAG.AddToParser(parser)
def AddEndpointVisibilityEnum(parser):
"""Add the --connectivity=[external|internal] flag."""
parser.add_argument(
'--connectivity',
choices=_VISIBILITY_MODES,
help=('Defaults to \'external\'. If \'external\', the service can be '
'invoked through the internet, in addition to through the cluster '
'network.'))
def AddServiceFlag(parser):
"""Add a service resource flag."""
parser.add_argument(
'--service',
required=False,
help='Limit matched revisions to the given service.')
def AddRegionArg(parser):
"""Add a region arg."""
parser.add_argument(
'--region',
help='Region in which the resource can be found. '
'Alternatively, set the property [run/region].')
# TODO(b/118339293): When global list endpoint ready, stop hardcoding regions.
def AddRegionArgWithDefault(parser):
"""Add a region arg which defaults to us-central1.
This is used by commands which list global resources.
Args:
parser: ArgumentParser, The calliope argparse parser.
"""
parser.add_argument(
'--region',
default='us-central1',
help='Region in which to list the resources.')
def AddFunctionArg(parser):
"""Add a function resource arg."""
parser.add_argument(
'--function',
hidden=True,
help="""\
Specifies that the deployed object is a function. If a value is
provided, that value is used as the entrypoint.
""")
def AddUpdateTrafficFlags(parser):
"""Add flags for updating traffic assignments for a service."""
@staticmethod
def TrafficTargetKey(key):
return key
@staticmethod
def TrafficPercentageValue(value):
"""Type validation for traffic percentage flag values."""
try:
result = int(value)
except (TypeError, ValueError):
raise ArgumentError(
'Traffic percentage value %s is not an integer.' % value
)
if result < 0 or result > 100:
raise ArgumentError(
'Traffic percentage value %s is not between 0 and 100.' % value
)
return result
group = GetGkeArgGroup(parser).add_mutually_exclusive_group()
group.add_argument(
'--to-revision',
metavar='REVISION-NAME=PERCENTAGE',
action=arg_parsers.UpdateAction,
type=arg_parsers.ArgDict(
key_type=TrafficTargetKey.__func__,
value_type=TrafficPercentageValue.__func__),
help='Comma separated list of traffic assignments in the form '
'REVISION-NAME=PERCENTAGE. REVISION-NAME must be the name for a '
'revision for the service as returned by \'gcloud beta run list '
'revisions\'. PERCENTAGE must be an integer percentage between '
'0 and 100 inclusive. Ex service-nw9hs=10,service-nw9hs=20 '
'Up to 100 percent of traffic may be assigned. If 100 percent '
'of traffic is assigned, the Service traffic is updated as '
'specified. If under 100 percent of traffic is assigned, the '
'Service traffic is updated as specified for revisions with '
'assignments and traffic is scaled up or down down proportionally '
'as needed for revision that are currently serving traffic but that do '
'not have new assignments. For example assume revision-1 is serving '
'40 percent of traffic and revision-2 is serving 60 percent. If '
'revision-1 is assigned 45 percent of traffic and no assignment is '
'made for revision-2, the service is updated with revsion-1 assigned '
'45 percent of traffic and revision-2 scaled down to 55 percent.')
group.add_argument(
'--to-latest',
default=False,
action='store_true',
help='True to assign 100 percent of traffic to the \'latest\' '
'revision of this service. Note that when a new revision is '
'created, it will become the \'latest\' and traffic will be '
'directed to it. Defaults to False.')
def AddCloudSQLFlags(parser):
"""Add flags for setting CloudSQL stuff."""
repeated.AddPrimitiveArgs(
parser,
'Service',
'cloudsql-instances',
'Cloud SQL instances',
auto_group_help=False,
additional_help="""\
These flags modify the Cloud SQL instances this Service connects to.
You can specify a name of a Cloud SQL instance if it's in the same
project and region as your Cloud Run service; otherwise specify
<project>:<region>:<instance> for the instance.""")
def AddMapFlagsNoFile(parser,
flag_name,
group_help='',
long_name=None,
key_type=None,
value_type=None):
"""Add flags like map_util.AddUpdateMapFlags but without the file one.
Args:
parser: The argument parser
flag_name: The name for the property to be used in flag names
group_help: Help text for the group of flags
long_name: The name for the property to be used in help text
key_type: A function to apply to map keys.
value_type: A function to apply to map values.
"""
if not long_name:
long_name = flag_name
group = parser.add_mutually_exclusive_group(group_help)
update_remove_group = group.add_argument_group(
help=('Only --update-{0} and --remove-{0} can be used together. If both '
'are specified, --remove-{0} will be applied first.'
).format(flag_name))
map_util.AddMapUpdateFlag(
update_remove_group,
flag_name,
long_name,
key_type=key_type,
value_type=value_type)
map_util.AddMapRemoveFlag(
update_remove_group, flag_name, long_name, key_type=key_type)
map_util.AddMapClearFlag(group, flag_name, long_name)
map_util.AddMapSetFlag(
group, flag_name, long_name, key_type=key_type, value_type=value_type)
def AddMutexEnvVarsFlags(parser):
"""Add flags for creating updating and deleting env vars."""
# TODO(b/119837621): Use env_vars_util.AddUpdateEnvVarsFlags when
# `gcloud run` supports an env var file.
AddMapFlagsNoFile(
parser,
flag_name='env-vars',
long_name='environment variables',
key_type=env_vars_util.EnvVarKeyType,
value_type=env_vars_util.EnvVarValueType)
def AddMemoryFlag(parser):
parser.add_argument('--memory', help='Set a memory limit. Ex: 1Gi, 512Mi.')
def AddCpuFlag(parser):
parser.add_argument(
'--cpu',
help='Set a CPU limit in Kubernetes cpu units. '
'Ex: .5, 500m, 2.')
def AddConcurrencyFlag(parser):
parser.add_argument(
'--concurrency',
help='Set the number of concurrent requests allowed per '
'container instance. A concurrency of 0 or unspecified indicates '
'any number of concurrent requests are allowed. To unset '
'this field, provide the special value `default`.')
def AddTimeoutFlag(parser):
parser.add_argument(
'--timeout',
help='Set the maximum request execution time (timeout). It is specified '
'as a duration; for example, "10m5s" is ten minutes, and five seconds. '
'If you don\'t specify a unit, seconds is assumed. For example, "10" is '
'10 seconds.')
def AddServiceAccountFlag(parser):
parser.add_argument(
'--service-account',
help='Email address of the IAM service account associated with the '
'revision of the service. The service account represents the identity of '
'the running revision, and determines what permissions the revision has. '
'If not provided, the revision will use the project\'s default service '
'account.')
def AddPlatformArg(parser):
"""Add a platform arg."""
parser.add_argument(
'--platform',
choices=_PLATFORMS,
action=actions.StoreProperty(properties.VALUES.run.platform),
help='Target platform for running commands. '
'Alternatively, set the property [run/platform]. '
'If not specified, the user will be prompted to choose a platform.')
def AddAlphaPlatformArg(parser):
"""Add a platform arg with alpha choices."""
parser.add_argument(
'--platform',
choices=_PLATFORMS_ALPHA,
action=actions.StoreProperty(properties.VALUES.run.platform),
help='Target platform for running commands. '
'Alternatively, set the property [run/platform]. '
'If not specified, the user will be prompted to choose a platform.')
def AddKubeconfigFlags(parser):
parser.add_argument(
'--kubeconfig',
help='The absolute path to your kubectl config file. If not specified, '
'the colon- or semicolon-delimited list of paths specified by '
'$KUBECONFIG will be used. If $KUBECONFIG is unset, this defaults to '
'`{}`.'.format(_DEFAULT_KUBECONFIG_PATH))
parser.add_argument(
'--context',
help='The name of the context in your kubectl config file to use for '
'connecting.')
def AddRevisionSuffixArg(parser):
parser.add_argument(
'--revision-suffix',
help='Specify the suffix of the revision name. Revision names always '
'start with the service name automatically. For example, specifying '
'[--revision-suffix=v1] for a service named \'helloworld\', '
'would lead to a revision named \'helloworld-v1\'.')
def AddVpcConnectorArg(parser):
parser.add_argument(
'--vpc-connector', help='Set a VPC connector for this Service.')
parser.add_argument(
'--clear-vpc-connector',
action='store_true',
help='Remove the VPC connector for this Service.')
def AddSecretsFlags(parser):
"""Add flags for creating, updating, and deleting secrets."""
AddMapFlagsNoFile(
parser,
group_help='Specify where to mount which secrets. '
'Mount paths map to a secret name. '
'Optionally, add an additional parameter to specify a '
'volume name for the secret. For example, '
'\'--update-secrets=/my/path=mysecret:secretvol\' will '
'create a volume named \'secretvol\' with a secret '
'named \'mysecret\' and mount that volume at \'/my/path\'. '
'If a volume name is not provided, the secret name '
'will be used.',
flag_name='secrets',
long_name='secret mount paths')
def AddConfigMapsFlags(parser):
"""Add flags for creating, updating, and deleting config maps."""
AddMapFlagsNoFile(
parser,
group_help='Specify where to mount which config maps. '
'Mount paths map to a config map name. '
'Optionally, add an additional parameter to specify a '
'volume name for the config map. For example, '
'\'--update-config-maps=/my/path=myconfig:configvol\' will '
'create a volume named \'configvol\' with a config map '
'named \'myconfig\' and mount that volume at \'/my/path\'. '
'If a volume name is not provided, the config map name '
'will be used.',
flag_name='config-maps',
long_name='config map mount paths')
def AddLabelsFlags(parser, add_create=True):
"""Adds update command labels flags to an argparse parser.
Args:
parser: The argparse parser to add the flags to.
add_create: bool, If True, add the --labels flag.
"""
if add_create:
labels_util.GetCreateLabelsFlag(
validate_keys=False, validate_values=False).AddToParser(parser)
labels_util.GetUpdateLabelsFlag(
'', validate_keys=False, validate_values=False).AddToParser(parser)
remove_group = parser.add_mutually_exclusive_group()
labels_util.GetClearLabelsFlag().AddToParser(remove_group)
labels_util.GetRemoveLabelsFlag('').AddToParser(remove_group)
class _ScaleValue(object):
"""Type for min/max-instaces flag values."""
def __init__(self, value):
self.restore_default = value == 'default'
if not self.restore_default:
try:
self.instance_count = int(value)
except (TypeError, ValueError):
raise ArgumentError(
'Instance count value %s is not an integer '
'or \'default\'.' % value
)
if self.instance_count < 0:
raise ArgumentError('Instance count value %s is negative.' % value)
def AddScalingFlags(parser):
"""Add flags for scaling knobs."""
help_msg = (
'The {bound} number of container instances of the Service to run or '
'\'default\' to remove any {bound}.')
GetClusterArgGroup(parser).add_argument(
'--min-instances', type=_ScaleValue,
help=help_msg.format(bound='minimum'))
parser.add_argument(
'--max-instances', type=_ScaleValue,
help=help_msg.format(bound='maximum'))
def AddCommandFlag(parser):
"""Add flags for specifying container's startup command."""
parser.add_argument(
'--command',
help='Entrypoint for the container image. If not specified, the '
'container image\'s default Entrypoint is run. '
'To reset this field to its default, pass an empty string.')
def AddArgsFlag(parser):
"""Add flags for specifying container's startup args."""
parser.add_argument(
'--args',
help='Arguments passed to the command run by the container '
'image. If not specified and no \'--command\' is provided, the container '
'image\'s default Cmd is used. Otherwise, if not specified, no arguments '
'are passed. '
'To reset this field to its default, pass an empty string.')
def _HasChanges(args, flags):
"""True iff any of the passed flags are set."""
return any(_FlagIsExplicitlySet(args, flag) for flag in flags)
def _HasEnvChanges(args):
"""True iff any of the env var flags are set."""
env_flags = [
'update_env_vars', 'set_env_vars', 'remove_env_vars', 'clear_env_vars'
]
return _HasChanges(args, env_flags)
def _HasCloudSQLChanges(args):
"""True iff any of the cloudsql flags are set."""
instances_flags = [
'add_cloudsql_instances', 'set_cloudsql_instances',
'remove_cloudsql_instances', 'clear_cloudsql_instances'
]
return _HasChanges(args, instances_flags)
def _HasLabelChanges(args):
"""True iff any of the label flags are set."""
label_flags = ['labels', 'update_labels', 'clear_labels', 'remove_labels']
return _HasChanges(args, label_flags)
def _HasSecretsChanges(args):
"""True iff any of the secret flags are set."""
secret_flags = [
'update_secrets', 'set_secrets', 'remove_secrets', 'clear_secrets'
]
return _HasChanges(args, secret_flags)
def _HasConfigMapsChanges(args):
"""True iff any of the config maps flags are set."""
config_maps_flags = [
'update_config_maps', 'set_config_maps', 'remove_config_maps',
'clear_config_maps'
]
return _HasChanges(args, config_maps_flags)
def _HasTrafficChanges(args):
"""True iff any of the traffic flags are set."""
traffic_flags = ['to_revision', 'to_latest']
return _HasChanges(args, traffic_flags)
def _GetEnvChanges(args):
"""Return config_changes.EnvVarChanges for given args."""
kwargs = {}
update = args.update_env_vars or args.set_env_vars
if update:
kwargs['env_vars_to_update'] = update
remove = args.remove_env_vars
if remove:
kwargs['env_vars_to_remove'] = remove
if args.set_env_vars or args.clear_env_vars:
kwargs['clear_others'] = True
return config_changes.EnvVarChanges(**kwargs)
def _GetScalingChanges(args):
"""Returns the list of changes for scaling for given args."""
result = []
if 'min_instances' in args and args.min_instances is not None:
scale_value = args.min_instances
if scale_value.restore_default or scale_value.instance_count == 0:
result.append(config_changes.DeleteTemplateAnnotationChange(
'autoscaling.knative.dev/minScale'))
else:
result.append(config_changes.SetTemplateAnnotationChange(
'autoscaling.knative.dev/minScale', str(scale_value.instance_count)))
if 'max_instances' in args and args.max_instances is not None:
scale_value = args.max_instances
if scale_value.restore_default:
result.append(config_changes.DeleteTemplateAnnotationChange(
'autoscaling.knative.dev/maxScale'))
else:
result.append(config_changes.SetTemplateAnnotationChange(
'autoscaling.knative.dev/maxScale', str(scale_value.instance_count)))
return result
def _GetSecretsChanges(args):
"""Return config_changes.SecretVolumeChanges for given args."""
kwargs = {}
update = args.update_secrets or args.set_secrets
if update:
kwargs['mounts_to_update'] = update
remove = args.remove_secrets
if remove:
kwargs['mounts_to_remove'] = remove
if args.set_secrets or args.clear_secrets:
kwargs['clear_others'] = True
return config_changes.SecretVolumeChanges(**kwargs)
def _GetConfigMapsChanges(args):
"""Return config_changes.ConfigMapVolumeChanges for given args."""
kwargs = {}
update = args.update_config_maps or args.set_config_maps
if update:
kwargs['mounts_to_update'] = update
remove = args.remove_config_maps
if remove:
kwargs['mounts_to_remove'] = remove
if args.set_config_maps or args.clear_config_maps:
kwargs['clear_others'] = True
return config_changes.ConfigMapVolumeChanges(**kwargs)
def PromptToEnableApi(service_name):
"""Prompts to enable the API and throws if the answer is no.
Args:
service_name: str, The service token of the API to prompt for.
"""
if not properties.VALUES.core.should_prompt_to_enable_api.GetBool():
return
project = properties.VALUES.core.project.Get(required=True)
# Don't prompt to enable an already enabled API
if not enable_api.IsServiceEnabled(project, service_name):
if console_io.PromptContinue(
default=False,
cancel_on_no=True,
prompt_string=('API [{}] not enabled on project [{}]. '
'Would you like to enable and retry (this will take a '
'few minutes)?').format(service_name, project)):
enable_api.EnableService(project, service_name)
_CLOUD_SQL_API_SERVICE_TOKEN = 'sql-component.googleapis.com'
_CLOUD_SQL_ADMIN_API_SERVICE_TOKEN = 'sqladmin.googleapis.com'
def _CheckCloudSQLApiEnablement():
if not properties.VALUES.core.should_prompt_to_enable_api.GetBool():
return
PromptToEnableApi(_CLOUD_SQL_API_SERVICE_TOKEN)
PromptToEnableApi(_CLOUD_SQL_ADMIN_API_SERVICE_TOKEN)
def _GetTrafficChanges(args):
"""Returns a changes for traffic assignment based on the flags."""
new_percentages = args.to_revision if args.to_revision else {}
new_latest_percentage = 100 if args.to_latest else None
return config_changes.TrafficChanges(new_percentages, new_latest_percentage)
def GetConfigurationChanges(args):
"""Returns a list of changes to Configuration, based on the flags set."""
changes = []
changes.extend(_GetScalingChanges(args))
if _HasEnvChanges(args):
changes.append(_GetEnvChanges(args))
if _HasTrafficChanges(args):
changes.append(_GetTrafficChanges(args))
if _HasCloudSQLChanges(args):
region = GetRegion(args)
project = (
getattr(args, 'project', None) or
properties.VALUES.core.project.Get(required=True))
_CheckCloudSQLApiEnablement()
changes.append(config_changes.CloudSQLChanges(project, region, args))
if _HasSecretsChanges(args):
changes.append(_GetSecretsChanges(args))
if _HasConfigMapsChanges(args):
changes.append(_GetConfigMapsChanges(args))
if 'cpu' in args and args.cpu:
changes.append(config_changes.ResourceChanges(cpu=args.cpu))
if 'memory' in args and args.memory:
changes.append(config_changes.ResourceChanges(memory=args.memory))
if 'concurrency' in args and args.concurrency:
try:
c = int(args.concurrency)
except ValueError:
c = args.concurrency
if c != 'default':
log.warning('Specifying concurrency as Single or Multi is deprecated; '
'an integer is preferred.')
changes.append(config_changes.ConcurrencyChanges(concurrency=c))
if 'timeout' in args and args.timeout:
try:
# A bare number is interpreted as seconds.
timeout_secs = int(args.timeout)
except ValueError:
timeout_duration = times.ParseDuration(args.timeout)
timeout_secs = int(timeout_duration.total_seconds)
if timeout_secs <= 0:
raise ArgumentError(
'The --timeout argument must be a positive time duration.')
changes.append(config_changes.TimeoutChanges(timeout=timeout_secs))
if 'service_account' in args and args.service_account:
changes.append(
config_changes.ServiceAccountChanges(
service_account=args.service_account))
if _HasLabelChanges(args):
additions = (
args.labels
if _FlagIsExplicitlySet(args, 'labels') else args.update_labels)
diff = labels_util.Diff(
additions=additions,
subtractions=args.remove_labels,
clear=args.clear_labels)
if diff.MayHaveUpdates():
changes.append(config_changes.LabelChanges(diff))
if 'revision_suffix' in args and args.revision_suffix:
changes.append(config_changes.RevisionNameChanges(args.revision_suffix))
if 'vpc_connector' in args and args.vpc_connector:
changes.append(config_changes.VpcConnectorChange(args.vpc_connector))
if 'clear_vpc_connector' in args and args.clear_vpc_connector:
changes.append(config_changes.ClearVpcConnectorChange())
if 'connectivity' in args and args.connectivity:
if args.connectivity == 'internal':
changes.append(config_changes.EndpointVisibilityChange(True))
elif args.connectivity == 'external':
changes.append(config_changes.EndpointVisibilityChange(False))
if 'command' in args and args.command is not None:
# Allow passing an empty string here to reset the field
changes.append(config_changes.ContainerCommandChange(args.command))
if 'args' in args and args.args is not None:
# Allow passing an empty string here to reset the field
changes.append(config_changes.ContainerArgsChange(args.args))
return changes
def GetService(args):
"""Get and validate the service resource from the args."""
service_ref = args.CONCEPTS.service.Parse()
# Valid service names comprise only alphanumeric characters and dashes. Must
# not begin or end with a dash, and must not contain more than 63 characters.
# Must be lowercase.
service_re = re.compile(r'(?=^[a-z0-9-]{1,63}$)(?!^\-.*)(?!.*\-$)')
if service_re.match(service_ref.servicesId):
return service_ref
raise ArgumentError(
'Invalid service name [{}]. Service name must use only lowercase '
'alphanumeric characters and dashes. Cannot begin or end with a dash, '
'and cannot be longer than 63 characters.'.format(service_ref.servicesId))
def GetClusterRef(cluster):
project = properties.VALUES.core.project.Get(required=True)
return resources.REGISTRY.Parse(
cluster.name,
params={
'projectId': project,
'zone': cluster.zone
},
collection='container.projects.zones.clusters')
def PromptForRegion():
"""Prompt for region from list of available regions.
This method is referenced by the declaritive iam commands as a fallthrough
for getting the region.
Returns:
The region specified by the user, str
"""
if console_io.CanPrompt():
client = global_methods.GetServerlessClientInstance()
all_regions = global_methods.ListRegions(client)
idx = console_io.PromptChoice(
all_regions, message='Please specify a region:\n', cancel_option=True)
region = all_regions[idx]
log.status.Print('To make this the default region, run '
'`gcloud config set run/region {}`.\n'.format(region))
return region
def GetRegion(args, prompt=False):
"""Prompt for region if not provided.
Region is decided in the following order:
- region argument;
- run/region gcloud config;
- compute/region gcloud config;
- prompt user.
Args:
args: Namespace, The args namespace.
prompt: bool, whether to attempt to prompt.
Returns:
A str representing region.
"""
if getattr(args, 'region', None):
return args.region
if properties.VALUES.run.region.IsExplicitlySet():
return properties.VALUES.run.region.Get()
if properties.VALUES.compute.region.IsExplicitlySet():
return properties.VALUES.compute.region.Get()
if prompt:
region = PromptForRegion()
if region:
# set the region on args, so we're not embarassed the next time we call
# GetRegion
args.region = region
return region
def GetAllowUnauthenticated(args, client=None, service_ref=None, prompt=False):
"""Return bool for the explicit intent to allow unauth invocations or None.
If --[no-]allow-unauthenticated is set, return that value. If not set,
prompt for value if desired. If prompting not necessary or doable,
return None, indicating that no action needs to be taken.
Args:
args: Namespace, The args namespace
client: from googlecloudsdk.command_lib.run import serverless_operations
serverless_operations.ServerlessOperations object
service_ref: service resource reference (e.g. args.CONCEPTS.service.Parse())
prompt: bool, whether to attempt to prompt.
Returns:
bool indicating whether to allow/unallow unauthenticated or None if N/A
"""
if getattr(args, 'allow_unauthenticated', None) is not None:
return args.allow_unauthenticated
if prompt:
# Need to check if the user has permissions before we prompt
assert client is not None and service_ref is not None
if client.CanSetIamPolicyBinding(service_ref):
return console_io.PromptContinue(
prompt_string=('Allow unauthenticated invocations '
'to [{}]'.format(service_ref.servicesId)),
default=False)
else:
pretty_print.Info(
'This service will require authentication to be invoked.')
return None
def GetKubeconfig(args):
"""Get config from kubeconfig file.
Get config from potentially 3 different places, falling back to the next
option as necessary:
1. file_path specified as argument by the user
2. List of file paths specified in $KUBECONFIG
3. Default config path (~/.kube/config)
Args:
args: Namespace, The args namespace.
Returns:
dict: config object
Raises:
KubeconfigError: if $KUBECONFIG is set but contains no valid paths
"""
if getattr(args, 'kubeconfig', None):
return kubeconfig.Kubeconfig.LoadFromFile(
files.ExpandHomeDir(args.kubeconfig))
if os.getenv('KUBECONFIG'):
config_paths = os.getenv('KUBECONFIG').split(os.pathsep)
config = None
# Merge together all valid paths into single config
for path in config_paths:
try:
other_config = kubeconfig.Kubeconfig.LoadFromFile(
files.ExpandHomeDir(path))
if not config:
config = other_config
else:
config.Merge(other_config)
except kubeconfig.Error:
pass
if not config:
raise KubeconfigError('No valid file paths found in $KUBECONFIG')
return config
return kubeconfig.Kubeconfig.LoadFromFile(
files.ExpandHomeDir(_DEFAULT_KUBECONFIG_PATH))
def _FlagIsExplicitlySet(args, flag):
"""Return True if --flag is explicitly passed by the user."""
# hasattr check is to allow the same code to work for release tracks that
# don't have the args at all yet.
return hasattr(args, flag) and args.IsSpecified(flag)
def VerifyOnePlatformFlags(args):
"""Raise ConfigurationError if args includes GKE only arguments."""
error_msg = ('The `{flag}` flag is not supported on the fully managed '
'version of Cloud Run. Specify `--platform {platform}` or run '
'`gcloud config set run/platform {platform}` to work with '
'{platform_desc}.')
if _FlagIsExplicitlySet(args, 'min_instances'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--min-instances',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'connectivity'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--connectivity=[internal|external]',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'cpu'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--cpu',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'namespace'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--namespace',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'cluster'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--cluster',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'cluster_location'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--cluster-location',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _HasSecretsChanges(args):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--[update|set|remove|clear]-secrets',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _HasConfigMapsChanges(args):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--[update|set|remove|clear]-config-maps',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'kubeconfig'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--kubeconfig',
platform='kubernetes',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
if _FlagIsExplicitlySet(args, 'context'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--context',
platform='kubernetes',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
if _FlagIsExplicitlySet(args, 'to_revision'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--to-revision',
platform='kubernetes',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
if _FlagIsExplicitlySet(args, 'to_latest'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--to-latest',
platform='kubernetes',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
def VerifyGKEFlags(args):
"""Raise ConfigurationError if args includes OnePlatform only arguments."""
error_msg = ('The `{flag}` flag is not supported with Cloud Run on GKE. '
'Specify `--platform {platform}` or run `gcloud config set '
'run/platform {platform}` to work with {platform_desc}.')
if _FlagIsExplicitlySet(args, 'allow_unauthenticated'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--allow-unauthenticated',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'service_account'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--service-account',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'region'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--region',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'revision_suffix'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--revision-suffix',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'vpc_connector'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--vpc-connector',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'clear_vpc_connector'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--clear-vpc-connector',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'kubeconfig'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--kubeconfig',
platform='kubernetes',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
if _FlagIsExplicitlySet(args, 'context'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--context',
platform='kubernetes',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['kubernetes']))
def VerifyKubernetesFlags(args):
"""Raise ConfigurationError if args includes OnePlatform or GKE only arguments."""
error_msg = ('The `{flag}` flag is not supported when connecting to a '
'Kubenetes cluster. Specify `--platform {platform}` or run '
'`gcloud config set run/platform {platform}` to work with '
'{platform_desc}.')
if _FlagIsExplicitlySet(args, 'allow_unauthenticated'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--allow-unauthenticated',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'service_account'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--service-account',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'region'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--region',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'revision_suffix'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--revision-suffix',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'vpc_connector'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--vpc-connector',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'clear_vpc_connector'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--clear-vpc-connector',
platform='managed',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['managed']))
if _FlagIsExplicitlySet(args, 'cluster'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--cluster',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
if _FlagIsExplicitlySet(args, 'cluster_location'):
raise serverless_exceptions.ConfigurationError(
error_msg.format(
flag='--cluster-location',
platform='gke',
platform_desc=_PLATFORM_SHORT_DESCRIPTIONS['gke']))
def GetPlatformFallback():
"""Fallback to accessing the property for declaritive commands."""
return properties.VALUES.run.platform.Get()
def GetPlatform(args):
"""Returns the platform to run on."""
platform = properties.VALUES.run.platform.Get()
choices = args.GetFlagArgument('platform').choices_help
if platform is None:
if console_io.CanPrompt():
platform_descs = [_PLATFORM_SHORT_DESCRIPTIONS[k] for k in choices]
index = console_io.PromptChoice(
platform_descs,
message='Please choose a target platform:',
cancel_option=True)
platform = list(choices.keys())[index]
# Set platform so we don't re-prompt on future calls to this method
properties.VALUES.run.platform.Set(platform)
log.status.Print(
'To specify the platform yourself, pass `--platform {0}`. '
'Or, to make this the default target platform, run '
'`gcloud config set run/platform {0}`.\n'.format(platform))
else:
raise ArgumentError(
'No platform specified. Pass the `--platform` flag or set '
'the [run/platform] property to specify a target platform.\n'
'Available platforms:\n{}'.format(
'\n'.join(
['- {}: {}'.format(k, v) for k, v in choices.items()])))
if platform == 'managed':
VerifyOnePlatformFlags(args)
elif platform == 'gke':
VerifyGKEFlags(args)
elif platform == 'kubernetes':
VerifyKubernetesFlags(args)
else:
raise ArgumentError(
'Invalid target platform specified: [{}].\n'
'Available platforms:\n{}'.format(
platform,
'\n'.join(['- {}: {}'.format(k, v) for k, v in choices.items()
])))
return platform
def IsKubernetes(args):
"""Returns True if args property specify Kubernetes.
Args:
args: Namespace, The args namespace.
"""
return GetPlatform(args) == 'kubernetes'
def IsGKE(args):
"""Returns True if args properly specify GKE.
Args:
args: Namespace, The args namespace.
"""
return GetPlatform(args) == 'gke'
def IsManaged(args):
"""Returns True if args properly specify managed.
Args:
args: Namespace, The args namespace.
"""
return GetPlatform(args) == 'managed'
def ValidatePlatformIsManaged(platform):
if platform != 'managed':
raise calliope_exceptions.BadArgumentException(
'--platform', 'The platform [{}] is not supported by this operation. '
'Specify `--platform managed` or run '
'`gcloud config set run/platform managed`.'.format(platform))
return platform
| 35.357835 | 84 | 0.693989 |
daceb4f3f8f7285a81b920bd45635d303f9cf908 | 9,198 | py | Python | openstack_dashboard/dashboards/project/network_topology/tests.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 3 | 2016-04-05T14:25:31.000Z | 2018-11-18T16:03:14.000Z | openstack_dashboard/dashboards/project/network_topology/tests.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 1 | 2019-10-27T15:57:25.000Z | 2019-10-27T15:57:25.000Z | openstack_dashboard/dashboards/project/network_topology/tests.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 2 | 2015-12-28T14:36:30.000Z | 2018-11-18T16:03:15.000Z | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
import django.test
from mox3.mox import IsA # noqa
from oslo_serialization import jsonutils
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
JSON_URL = reverse('horizon:project:network_topology:json')
INDEX_URL = reverse('horizon:project:network_topology:index')
class NetworkTopologyTests(test.TestCase):
@test.create_stubs({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view(self):
self._test_json_view()
@django.test.utils.override_settings(
OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
@test.create_stubs({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'port_list')})
def test_json_view_router_disabled(self):
self._test_json_view(router_enable=False)
def _test_json_view(self, router_enable=True):
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([self.servers.list(), False])
tenant_networks = [net for net in self.networks.list()
if not net['router:external']]
external_networks = [net for net in self.networks.list()
if net['router:external']]
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest),
self.tenant.id).AndReturn(tenant_networks)
if router_enable:
api.neutron.network_list(
IsA(http.HttpRequest),
**{'router:external': True}).AndReturn(external_networks)
# router1 : gateway port not in the port list
# router2 : no gateway port
# router3 : gateway port included in port list
routers = self.routers.list() + self.routers_with_rules.list()
if router_enable:
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(routers)
api.neutron.port_list(
IsA(http.HttpRequest)).AndReturn(self.ports.list())
self.mox.ReplayAll()
res = self.client.get(JSON_URL)
self.assertEqual('text/json', res['Content-Type'])
data = jsonutils.loads(res.content)
# servers
# result_server_urls = [(server['id'], server['url'])
# for server in data['servers']]
expect_server_urls = [
{'id': server.id,
'name': server.name,
'status': server.status,
'task': None,
'url': '/project/instances/%s/' % server.id}
for server in self.servers.list()]
self.assertEqual(expect_server_urls, data['servers'])
# routers
# result_router_urls = [(router['id'], router['url'])
# for router in data['routers']]
if router_enable:
expect_router_urls = [
{'id': router.id,
'external_gateway_info':
router.external_gateway_info,
'name': router.name,
'status': router.status,
'url': '/project/routers/%s/' % router.id}
for router in routers]
self.assertEqual(expect_router_urls, data['routers'])
else:
self.assertFalse(data['routers'])
# networks
expect_net_urls = []
if router_enable:
expect_net_urls += [{'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'status': net.status,
'subnets': []}
for net in external_networks]
expect_net_urls += [{'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'status': net.status,
'subnets': [{'cidr': subnet.cidr,
'id': subnet.id,
'url':
'/project/networks/subnets/%s/detail'
% subnet.id}
for subnet in net.subnets]}
for net in tenant_networks]
for exp_net in expect_net_urls:
if exp_net['url'] is None:
del exp_net['url']
self.assertEqual(expect_net_urls, data['networks'])
# ports
expect_port_urls = [
{'id': port.id,
'device_id': port.device_id,
'device_owner': port.device_owner,
'fixed_ips': port.fixed_ips,
'network_id': port.network_id,
'status': port.status,
'url': '/project/networks/ports/%s/detail' % port.id}
for port in self.ports.list()]
if router_enable:
# fake port for router1 gateway (router1 on ext_net)
router1 = routers[0]
ext_net = external_networks[0]
expect_port_urls.append(
{'id': 'gateway%s' % ext_net.id,
'device_id': router1.id,
'network_id': ext_net.id,
'fixed_ips': []})
self.assertEqual(expect_port_urls, data['ports'])
class NetworkTopologyCreateTests(test.TestCase):
def _test_new_button_disabled_when_quota_exceeded(
self, expected_string, networks_quota=10,
routers_quota=10, instances_quota=10):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = networks_quota
quota_data['routers']['available'] = routers_quota
quota_data['instances']['available'] = instances_quota
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/network_topology/index.html')
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_network_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createnetwork')
classes = 'btn btn-default btn-sm ajax-modal'
link_name = "Create Network (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, networks_quota=0)
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_router_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createrouter')
classes = 'btn btn-default btn-sm ajax-modal'
link_name = "Create Router (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, routers_quota=0)
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_launch_instance_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:launchinstance')
classes = 'btn btn-default btn-sm btn-launch ajax-modal'
link_name = "Launch Instance (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='instances__action_launch'>" \
"<span class='fa fa-cloud-upload'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, instances_quota=0)
| 42.387097 | 79 | 0.573277 |
3d2efc36c284aff2991dc22a15573b2e007e0a20 | 592 | py | Python | Leetcode/0852. Peak Index in a Mountain Array.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | 1 | 2021-07-15T18:40:26.000Z | 2021-07-15T18:40:26.000Z | Leetcode/0852. Peak Index in a Mountain Array.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | Leetcode/0852. Peak Index in a Mountain Array.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | class Solution(object):
def peakIndexInMountainArray(self, arr: list[int]) -> int:
lo, hi = 0, len(arr) - 1
while lo < hi:
mi = (lo + hi) / 2
if arr[mi] < arr[mi + 1]:
lo = mi + 1
else:
hi = mi
return lo
class Solution:
def peakIndexInMountainArray(self, arr: list[int]) -> int:
for i in range(len(arr)):
if arr[i] > arr[i + 1]:
return i
class Solution:
def peakIndexInMountainArray(self, arr: list[int]) -> int:
return arr.index(max(arr))
| 25.73913 | 62 | 0.496622 |
66638815c0a976bab7acaadc3e54088c338eb6f1 | 1,059 | py | Python | test_alpha.py | NiclasEriksen/rpg_procgen | 8a4487886d606c2537d6d1f97be8f4a45deda761 | [
"CC0-1.0"
] | null | null | null | test_alpha.py | NiclasEriksen/rpg_procgen | 8a4487886d606c2537d6d1f97be8f4a45deda761 | [
"CC0-1.0"
] | null | null | null | test_alpha.py | NiclasEriksen/rpg_procgen | 8a4487886d606c2537d6d1f97be8f4a45deda761 | [
"CC0-1.0"
] | null | null | null |
import pyglet
from pyglet.gl import *
from shader import Shader
class JuliaWindow(pyglet.window.Window):
def __init__(self):
super(JuliaWindow, self).__init__(caption = 'julia', width = 512, height = 512)
self.C = (-0.70176, -0.3842)
shader_path = 'julia'
self.shader = Shader(
' '.join(open('%s.v.glsl' % shader_path)),
' '.join(open('%s.f.glsl' % shader_path))
)
def on_mouse_motion(self, x, y, dx, dy):
self.C = (6. * ((float(x) / window.width) - .5), 6 * ((float(y) / window.height) - .5))
def on_draw(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1., 1., 1., -1., 0., 1.)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
self.shader.bind()
self.shader.uniformf('C', *self.C)
glBegin(GL_QUADS)
glVertex2i(-1, -1)
glTexCoord2i(-2, -2)
glVertex2f(1, -1)
glTexCoord2i(2, -2)
glVertex2i(1, 1)
glTexCoord2i(2, 2)
glVertex2i(-1, 1)
glTexCoord2i(-2, 2)
glEnd()
self.shader.unbind()
window = JuliaWindow()
pyglet.app.run() | 24.068182 | 91 | 0.603399 |
42cfff132a3823e2e59af9afac8a690c1569de3b | 239 | py | Python | facets/wiki/utils.py | hunterhector/DDSemantics | 883ef1015bd21d9b8575d8000faf3b506a09f21c | [
"Apache-2.0"
] | null | null | null | facets/wiki/utils.py | hunterhector/DDSemantics | 883ef1015bd21d9b8575d8000faf3b506a09f21c | [
"Apache-2.0"
] | null | null | null | facets/wiki/utils.py | hunterhector/DDSemantics | 883ef1015bd21d9b8575d8000faf3b506a09f21c | [
"Apache-2.0"
] | 2 | 2018-06-24T17:40:31.000Z | 2020-07-30T19:19:55.000Z | def load_index(index_file):
"""Read the index file"""
index_dict = {}
with open(index_file) as f:
for line in f:
title, path = line.strip().split()
index_dict[title] = path
return index_dict
| 26.555556 | 46 | 0.577406 |
d88f79ecdee079a21c01f74ceacd272ecc060a15 | 965 | py | Python | src/Python/101-200/102.BinaryTreeLevelOrderTraversal.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | 2 | 2018-05-03T07:50:03.000Z | 2018-06-17T04:32:13.000Z | src/Python/101-200/102.BinaryTreeLevelOrderTraversal.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | null | null | null | src/Python/101-200/102.BinaryTreeLevelOrderTraversal.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | 3 | 2018-11-09T14:18:11.000Z | 2021-11-17T15:23:52.000Z |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> list:
if root is None:
return []
ans = []
tmp = []
queue = [root]
nextqueue = []
while len(queue) > 0:
node = queue.pop(0)
tmp.append(node.val)
if node.left is not None:
nextqueue.append(node.left)
if node.right is not None:
nextqueue.append(node.right)
if len(queue) == 0:
ans.append(tmp[:])
tmp.clear()
queue = nextqueue[:]
nextqueue.clear()
return ans
if __name__ == "__main__":
solution = Solution()
node = TreeNode(2)
node.left = TreeNode(1)
node.right = TreeNode(3)
node.right.left = TreeNode(4)
print(solution.levelOrder(node))
| 25.394737 | 49 | 0.495337 |
5e0f8c4a42b024ab6705526eb4b85c103f76aafd | 1,373 | py | Python | tests_bdd/_tools.py | Animatea/funchacks | 1589cdc3e042d96e8bb3e6b665111dc8d23208b1 | [
"Apache-2.0"
] | 6 | 2021-12-30T11:54:25.000Z | 2022-02-01T15:56:03.000Z | tests_bdd/_tools.py | Animatea/funchacks | 1589cdc3e042d96e8bb3e6b665111dc8d23208b1 | [
"Apache-2.0"
] | null | null | null | tests_bdd/_tools.py | Animatea/funchacks | 1589cdc3e042d96e8bb3e6b665111dc8d23208b1 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
__all__ = ["has_args", "function_template"]
from typing import TYPE_CHECKING, Any, Generator, Generic, Optional, TypeVar
from behave import fixture
from hamcrest.core.base_matcher import BaseMatcher
if TYPE_CHECKING:
from hamcrest.core.description import Description
from funchacks.typehints import AnyCallableT
ContextT = TypeVar("ContextT")
ItemT = TypeVar("ItemT")
class HasAttributes(BaseMatcher, Generic[ItemT]):
def __init__(self, *attrs: str) -> None:
self.attrs = attrs
self.failed: Optional[str] = None
def _matches(self, item: ItemT) -> bool:
for attr in self.attrs:
if not hasattr(item, attr):
self.failed = attr
return False
return True
def describe_to(self, description: Description) -> None:
(description.append_text("failing on ").append_text(f"<{self.failed}> attribute"))
def has_args(*attributes: str) -> HasAttributes[ItemT]:
return HasAttributes(*attributes)
@fixture
def function_template(_: ContextT) -> Generator[AnyCallableT, Any, None]:
"""Fixture that returns function object (template for many steps)."""
def foo(a: int, /, b: int, *, c: int) -> None:
# Some random code for testing locals and sig args.
d = 1
e = 2
return None
yield foo
| 26.921569 | 90 | 0.668609 |
9221f98ec7084e12accbe7ffa9f0d7c26217e3c0 | 19,643 | py | Python | tf2onnx/rewriter/loop_rewriter_base.py | jim-meyer/tensorflow-onnx | 2b3c23da102c875737362f858b78fa50ae48809f | [
"MIT"
] | null | null | null | tf2onnx/rewriter/loop_rewriter_base.py | jim-meyer/tensorflow-onnx | 2b3c23da102c875737362f858b78fa50ae48809f | [
"MIT"
] | null | null | null | tf2onnx/rewriter/loop_rewriter_base.py | jim-meyer/tensorflow-onnx | 2b3c23da102c875737362f858b78fa50ae48809f | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter.loop_rewriter_base
"""
from __future__ import division
from __future__ import print_function
import logging
from collections import OrderedDict
from tf2onnx import utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.rewriter.rnn_utils import is_loopcond_op, is_tensor_array_op
from tf2onnx.rewriter.rnn_utils import is_tensor_array_gather_op, is_tensor_array_write_op
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT
from tf2onnx.utils import TensorValueInfo
logger = logging.getLogger(__name__)
INVALID_INPUT_ID = utils.make_name("invalid_input_id")
# todo(pengwa) remove protected-access with changes to Graph/Node later.
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,protected-access
class Context(object):
def __init__(self):
self.while_context_scope = None
self.loop_properties = LoopProperties()
self.loop_cond = None
self.cell_graph = None # GraphInfo of cell graph
self.cond_graph = None # GraphInfo of condition graph
class GraphInfo(object):
def __init__(self, ops, inputs, outputs):
self.nodes = ops
self.inputs = inputs # list of TensorValueInfo in order
self.outputs = outputs # list of TensorValueInfo in order
self.dependent_vars = None
class LoopProperties(object):
def __init__(self):
# use enter name as key, they are initial inputs.
# we don't use enter_input_id because it might be
# used as initial input for more than one Enter nodes.
self.state_variables = OrderedDict()
self.scan_variables = OrderedDict()
self.tensor_array_inputs = [] # list of type InputTensorArray
def add_variable(self, var):
utils.make_sure(var.enter_name not in self.scan_variables,
"variable %s already exists as scan variable.", var.enter_name)
utils.make_sure(var.enter_name not in self.state_variables,
"variable %s already exists as state variable.", var.enter_name)
if not var.is_tensor_array:
self.state_variables[var.enter_name] = var
else:
self.scan_variables[var.enter_name] = var
def get_variables(self, checker):
if not checker:
return self.all_variables.values()
return [v for v in self.all_variables.values() if checker(v)]
@property
def all_variables(self):
items = self.state_variables.copy()
items.update(self.scan_variables)
return items
# state inputs and outputs are in pairs, even though some outputs are not depending on corresponding input,
# we leave the input id be None.
@property
def state_inputs(self):
return [v.switch_true_identity_output for v in self.state_variables.values()]
@property
def state_inputs_initial_values(self):
return [v.enter_input_id for v in self.state_variables.values()]
@property
def state_outputs(self):
return [v.next_iteration_input for v in self.state_variables.values()]
@property
def state_outputs_exits(self):
return [v.exit_output for v in self.state_variables.values()]
# scan output (e.g. tensor array) won't be used by next iteration calculation
@property
def scan_outputs(self):
return [v.next_iteration_input for v in self.scan_variables.values()]
@property
def scan_outputs_exits(self):
return [v.exit_output for v in self.scan_variables.values()]
# treat input tensor array as scan inputs
def add_scan_input(self, input_tensor_array):
self.tensor_array_inputs.append(input_tensor_array)
# usually it is called TensorArrayReadV3
@property
def scan_inputs(self):
return [i.consumer for i in self.tensor_array_inputs]
@property
def scan_inputs_initial_values(self):
return [i.data_input_id for i in self.tensor_array_inputs]
class LoopVariable(object):
"""In TensorFlow loop, all loop variables are listed both in iteration body graph's inputs, and outputs.
Loop (state variable 1, state variable 2) {
# do the calculation
# updated state variable 1 not necessarily only depends on state variable 1, it might depend
# on 0, 1 or more state variables.
# So if it depends on 0 state variable, then switch_true_identity_output.id is None. For this case,
# during conversion, a fake input for ONNX Loop body graph is created, but not consumed by any node.
return (updated) state variable 1, (updated) state variable 2, scan variable 1, scan variable 2
}
Here we take the perspective of body graph's outputs:
1. start from the iteration body graph's output (e.g. next_iteration_input.id)
2. find body graph generating it (those node between NextIteration and Switch)
3. find the variable initial value (e.g. enter_input_id)
4. check whether it is a tensor array
5. the body graph output might go to next iteration as corresponding input
(e.g. switch_true_identity_output.id).
"""
def __init__(self, enter_name, enter_input_id, next_iteration_input_id,
switch_true_identity_output_id, exit_output_id, is_tensor_array, ta_index_id, g):
self.enter_name = enter_name
self.enter_input_id = enter_input_id
# the output of iteration body graph for this variable
# should not be None
utils.make_sure(next_iteration_input_id, "next_iteration_input_id should not be None")
self.next_iteration_input = TensorValueInfo(next_iteration_input_id, g)
# the starting point of iteration body graph,
# might be None when this variable value (either initial value or last iteration output value)
# is not consumed iteration body graph nodes.
self.switch_true_identity_output = TensorValueInfo(switch_true_identity_output_id, g)
# the switch_false branch is ended with Exit, which is a boundary for the loop,
# might be None when no consumers for the variable output.
self.exit_output = TensorValueInfo(exit_output_id, g)
# only applicable for tensor array variable
self.is_tensor_array = is_tensor_array
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
# then we can be sure this is equivalent to scan output behavior.
self.ta_index_id = ta_index_id
class InputTensorArray(object):
def __init__(self, data_input_id, index_input_id, consumer_id, g):
self.index_input_id = index_input_id
self.data_input_id = data_input_id
# tensor array is unstacked before being used in loop, consumer_id is the node
# (in the iteration body graph) consuming one of the element of tensor array.
self.consumer = TensorValueInfo(consumer_id, g)
class LoopRewriterBase(object):
def __init__(self, g):
self.g = g
self.ta_read_input_pattern = \
OpTypePattern("TensorArrayReadV3", name="ta_read", inputs=[
OpTypePattern("Enter", name="ta_enter", inputs=[
OpTypePattern("TensorArrayV3")
]),
OpTypePattern("Identity", name="ta_index"),
OpTypePattern("Enter", name="ta_scatter_enter", inputs=[
OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter")
]),
])
def create_context(self):
return Context()
def need_rewrite(self, context):
return False
def rewrite(self, context):
return REWRITER_RESULT.FAIL
def run_internal(self):
loopcond_ops = []
for op in self.g.get_nodes():
if is_loopcond_op(op):
loopcond_ops.append(op)
# self.g.get_nodes may change inside this loop so that we parse all LoopCond first
for op in loopcond_ops:
logger.debug("======================\n handling loop cond node called %s", op.name)
context = self.create_context()
context.loop_cond = op
self._check_in_read_only_mode(context)
if self.need_rewrite(context):
# cut off connection between cell/cond graphs and useless nodes like Merge, NextIteration.
self._cut_off_connection_for_cell(context)
context.cell_graph = self._crop_loop_body_sub_graph(context)
context.cond_graph = self._crop_loop_condition_sub_graph(context)
_result = self.rewrite(context)
if _result == REWRITER_RESULT.OK:
logger.debug("rewrite successfully")
elif _result == REWRITER_RESULT.SKIP:
logger.debug("rewrite skipped for LoopCond called %s", op.name)
continue
elif _result == REWRITER_RESULT.FAIL:
raise ValueError("rewrite failed, so just fast fail it")
if self.g.outputs:
# clean the graph based on output names.
self.g.delete_unused_nodes(self.g.outputs)
return self.g.get_nodes()
def _check_in_read_only_mode(self, context):
self._parse_loop_variables(context)
self._parse_input_ta(context)
def _parse_loop_variables(self, context):
loop_cond_op = context.loop_cond
parts = loop_cond_op.name.split('/')
context.while_context_scope = '/'.join(parts[0:-1]) + "/"
logger.debug("found while loop scope %s", context.while_context_scope)
switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0])
for s in switch_nodes:
if s.type != 'Switch':
raise ValueError("LoopCond's output node should be followed with a Switch node")
loop_var = self._get_loop_var_from_switch(s)
context.loop_properties.add_variable(loop_var)
def _parse_input_ta(self, context):
graph_inputs = [v.switch_true_identity_output.id for v in context.loop_properties.all_variables.values()
if v.switch_true_identity_output.id]
matcher = GraphMatcher(self.ta_read_input_pattern, allow_reorder=False)
match_results = matcher.match_ops(self.g.get_nodes())
match_results = [r for r in match_results if r.get_op("ta_index").output[0] in graph_inputs]
for match in match_results:
ta_input_scatter = match.get_op("ta_input_scatter")
# the 3rd input of scatter is the value
data_input_id = ta_input_scatter.input[2]
ta_read_node = match.get_op("ta_read")
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
# then we can be sure this is equivalent to scan input behavior.
index_input_id = ta_read_node.input[1]
unstacked_ta_consumer = match.get_op("ta_read").output[0]
ta = InputTensorArray(data_input_id, index_input_id, unstacked_ta_consumer, self.g)
context.loop_properties.add_scan_input(ta)
def _crop_loop_body_sub_graph(self, context):
# according to input and output, find the body graph
loop_props = context.loop_properties
inputs = loop_props.state_inputs + loop_props.scan_inputs
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
outputs = loop_props.state_outputs + loop_props.scan_outputs
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
ops, enter_nodes, _ = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=False)
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(ops, enter_node.output[0], enter_node.input[0])
return GraphInfo(ops, inputs, outputs)
def _crop_loop_condition_sub_graph(self, context):
input_ids = []
output_ids = [context.loop_cond.input[0]]
outputs = [TensorValueInfo(o, self.g) for o in output_ids]
ops, enter_nodes, merge_nodes = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=True)
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(ops, enter_node.output[0], enter_node.input[0])
dependent_vars = []
for merge_node in merge_nodes:
enter_node = [n for n in merge_node.inputs if n.type == "Enter"][0]
loop_var = context.loop_properties.all_variables[enter_node.name]
# cut off connection between condition graph and Merge node.
non_switch_consumers = [n for n in self.g.find_output_consumers(merge_node.output[0]) if n.type != "Switch"]
self.g.replace_all_inputs(non_switch_consumers, merge_node.output[0],
loop_var.switch_true_identity_output.id)
dependent_vars.append(loop_var)
# cut off connection between condition graph and LoopCond node.
self.g.replace_all_inputs([context.loop_cond], context.loop_cond.output[0], INVALID_INPUT_ID)
graph_info = GraphInfo(ops, [], outputs)
graph_info.dependent_vars = dependent_vars
return graph_info
def _cut_off_connection_for_cell(self, context):
for val in context.loop_properties.all_variables.values():
if val.switch_true_identity_output.id:
# remove the node to cut off a starting node of the cell (e.g. loop body).
n = self.g.get_node_by_output(val.switch_true_identity_output.id)
self.g.remove_node(n.name)
if val.is_tensor_array:
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
ta_write_nodes = [n for n in self.g.get_nodes() if is_tensor_array_write_op(n)]
self.g.replace_all_inputs(ta_write_nodes, val.next_iteration_input.id, INVALID_INPUT_ID)
else:
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"]
self.g.replace_all_inputs(next_iter_nodes, val.next_iteration_input.id, INVALID_INPUT_ID)
for scan_input in context.loop_properties.scan_inputs:
# remove the node to cut off connection between scan_input and the cell.
self.g.remove_node(self.g.get_node_by_output(scan_input.id).name)
def _get_loop_var_from_switch(self, switch_node):
if switch_node.type != 'Switch':
logger.error("not a switch node, skip")
return None
# the first input is data
merge_node = switch_node.inputs[0]
if merge_node.type != "Merge":
logger.error("switch node does not has Merge as its first input")
return None
# find the output_true consumers
switch_consumers = self.g.find_output_consumers(switch_node.output[1])
switch_true_consumer_cnt = len(switch_consumers)
if switch_true_consumer_cnt == 0:
switch_true_identity_output = None
elif switch_true_consumer_cnt == 1:
if switch_consumers[0].type != "Identity":
raise ValueError("switch has consumer that is not Identity")
switch_true_identity_output = switch_consumers[0].output[0]
else:
raise ValueError("switch_true " + switch_node.name + " has unexpected count of consumers:",
[n.name for n in switch_consumers])
target_node_input_id = None
enter_node = [n for n in merge_node.inputs if n.type == 'Enter'][0]
target_node_input_id = enter_node.input[0]
logger.debug("a Switch >> Merge >> Enter is found called %s", enter_node.inputs[0].name)
next_iteration_node = [n for n in merge_node.inputs if n.type == 'NextIteration'][0]
last_iteration_output_id = next_iteration_node.input[0]
# find the output_false consumers to see whether there is consumer for this var
switch_false_consumers = self.g.find_output_consumers(switch_node.output[0])
false_consumer_count = len(switch_false_consumers)
exit_output_id = None
if false_consumer_count == 1:
exit_node = switch_false_consumers[0]
if exit_node.type != "Exit":
raise ValueError("switch false branch is followed by non-Exit")
exit_output_id = exit_node.output[0]
elif false_consumer_count == 0:
# sometime, the variable output won't be used in the new iteration as input.
exit_output_id = None
else:
raise ValueError("unexpected number of switch false consumers")
is_ta = False
ta_index_id = None
if is_tensor_array_op(self.g.get_node_by_output(target_node_input_id)):
is_ta = True
ta_write_node = self.g.get_node_by_output(last_iteration_output_id)
utils.make_sure(is_tensor_array_write_op(ta_write_node), "ta nextiteration is not following ta write op")
last_iteration_output_id = ta_write_node.input[2]
ta_index_id = ta_write_node.input[1]
# here we parse patterns generated by
# ta.write(), then ta.stack(), because this is the most frequent usage pattern.
if exit_output_id:
exit_consumers = self.g.find_output_consumers(exit_output_id)
ta_gather_node = [n for n in exit_consumers if is_tensor_array_gather_op(n)][0]
# update exit output id, treat the gather output as ta's output
exit_output_id = ta_gather_node.output[0]
loop_var = LoopVariable(enter_node.name, target_node_input_id, last_iteration_output_id,
switch_true_identity_output, exit_output_id, is_ta, ta_index_id, self.g)
return loop_var
@staticmethod
def find_subgraph(input_ids, output_ids, g, merge_as_end=False):
logger.debug("input ids %s ", input_ids)
logger.debug("output ids %s ", output_ids)
enter_nodes = set()
merge_nodes = set()
def find_input_boundary(node):
if node.type == "Enter":
enter_nodes.add(node)
logger.debug("terminate the input search at %s", node.name)
return False
if merge_as_end is True and node.type == "Merge":
merge_nodes.add(node)
logger.debug("terminate the input search at %s", node.name)
return False
if node.is_const():
logger.debug("terminate search at const node %s", node.name)
return False
for o in node.output:
if o in input_ids:
return False
return True
nodes = g.extract_sub_graph_nodes(output_ids, input_checker=find_input_boundary)
return nodes, enter_nodes, merge_nodes
@staticmethod
def construct_graph_from_nodes(parent_g, nodes, outputs):
return utils.construct_graph_from_nodes(
parent_g,
nodes,
[out.id for out in outputs],
[out.shape for out in outputs],
[out.dtype for out in outputs]
)
| 44.54195 | 120 | 0.664257 |
163808fdd7e60a1dde319bc9059aaf8b9b83ef29 | 470 | py | Python | model/project.py | DmitriyNeurov/python_training_mantis | db94fad4e01e7d29a962d80791c984ddcacf1033 | [
"Apache-2.0"
] | null | null | null | model/project.py | DmitriyNeurov/python_training_mantis | db94fad4e01e7d29a962d80791c984ddcacf1033 | [
"Apache-2.0"
] | null | null | null | model/project.py | DmitriyNeurov/python_training_mantis | db94fad4e01e7d29a962d80791c984ddcacf1033 | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Project:
def __init__(self, id=None, name=None):
self.name = name
self.id = id
def __repr__(self):
return "%s:%s" % (self.id, self.name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | 24.736842 | 103 | 0.553191 |
87099c3f5d151a559810235117c56a27d0211d0f | 784 | py | Python | test_split.py | mf2199/BBT-temporary | 262eb2d1c9c9a2de491abec275238272c8d7f002 | [
"RSA-MD"
] | null | null | null | test_split.py | mf2199/BBT-temporary | 262eb2d1c9c9a2de491abec275238272c8d7f002 | [
"RSA-MD"
] | null | null | null | test_split.py | mf2199/BBT-temporary | 262eb2d1c9c9a2de491abec275238272c8d7f002 | [
"RSA-MD"
] | null | null | null | from apache_beam.io import iobase
from google.cloud.bigtable import Client
from beam_bigtable.bigtable import BigTableSource
from google.cloud.bigtable.row_set import RowRange
from apache_beam.io.range_trackers import LexicographicKeyRangeTracker
from google.cloud.bigtable.row_set import RowSet
from apache_beam.metrics import Metrics
import copy
import math
project_id = 'grass-clump-479'
instance_id = 'python-write-2'
table_id = 'testmillionb38c02c4'
#table_id = 'testmillioned113e20'
client = Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
bigtable = BigTableSource(project_id, instance_id,
table_id)
for i in bigtable.get_sample_row_keys():
print(i.row_key)
print(i.offset_bytes) | 31.36 | 70 | 0.799745 |
238ba187076431605334793543d080d4e246e0c1 | 1,089 | py | Python | img_utils.py | M4gicT0/GatePoseEstimator | e81b6313d82347cbaa610f8d1276cf37f2d74e88 | [
"MIT"
] | null | null | null | img_utils.py | M4gicT0/GatePoseEstimator | e81b6313d82347cbaa610f8d1276cf37f2d74e88 | [
"MIT"
] | null | null | null | img_utils.py | M4gicT0/GatePoseEstimator | e81b6313d82347cbaa610f8d1276cf37f2d74e88 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 theo <theo@not-arch-linux>
#
# Distributed under terms of the MIT license.
import numpy as np
from keras.preprocessing import image
from PIL import Image
"""
Several image manipulation utility functions
"""
'''
Crops the given image over the given bounding box coordinates, and applies
zero-padding to the rest of the image. The returned image in fact has the
same dimensions as the given image
'''
def crop_and_pad(img, corner_min, corner_max, centered=True):
cropped = np.zeros(img.shape, dtype=img.dtype)
crop = img[corner_min[1]:corner_max[1], corner_min[0]:corner_max[0],:]
if centered:
startW = int((img.shape[1] - crop.shape[1]) / 2)
startH = int((img.shape[0] - crop.shape[0]) / 2)
cropped[startH:startH+crop.shape[0], startW:startW+crop.shape[1],:] = crop
else:
cropped[corner_min[1]:corner_max[1], corner_min[0]:corner_max[0],:] = crop
assert cropped.shape == img.shape, "Cropped image has been resized!"
return cropped
| 30.25 | 82 | 0.682277 |
7d4523b162468ce3894881e49865248556451778 | 12,181 | py | Python | data/toy.py | jeffwillette/few_shot_meta_learning | 284abd392929f2cc9d89beb047bb89e89c3a1c2e | [
"MIT"
] | null | null | null | data/toy.py | jeffwillette/few_shot_meta_learning | 284abd392929f2cc9d89beb047bb89e89c3a1c2e | [
"MIT"
] | null | null | null | data/toy.py | jeffwillette/few_shot_meta_learning | 284abd392929f2cc9d89beb047bb89e89c3a1c2e | [
"MIT"
] | null | null | null | from typing import List, Tuple, Any
import os
import numpy as np # type: ignore
import random
import torch
from torch.utils.data import Dataset
from matplotlib.colors import to_rgba # type: ignore
from matplotlib import pyplot as plt # type: ignore
from matplotlib.lines import Line2D # type: ignore
from sklearn.datasets import make_moons, make_circles # type: ignore
T = torch.Tensor
def get_biased_sample_idx(x: Any, y: Any, k_shot: int) -> Tuple[Any, ...]:
classes = np.unique(y)
n_sections = 2 # (n-way + kshot) * classes needs to be equally divisible by n_sections
sx, sy, qx, qy = np.empty((0, 2)), np.empty((0,)), np.empty((0, 2)), np.empty((0,))
for c in classes:
class_idx = np.argwhere(y == c).squeeze(1)
class_x, class_y = x[class_idx], y[class_idx]
x_or_y = 0 if np.sign(np.random.rand() - 0.5) < 0 else 1 # choose x or y index randomly
section = np.random.permutation(n_sections) # which half of the data to get
x_idx = np.argsort(class_x[:, x_or_y])
def sec(n: int) -> int:
return int(n * (x_idx.shape[0] // n_sections))
# get the support and qeury sets for this class which are split by section (whichever biased section we chose)
spt_x = class_x[x_idx[sec(section[0]) : sec(section[0] + 1)]] # get the proper third
spt_y = class_y[x_idx[sec(section[0]) : sec(section[0] + 1)]] # get the proper third
qry_x = class_x[x_idx[sec(section[1]) : sec(section[1] + 1)]]
qry_y = class_y[x_idx[sec(section[1]) : sec(section[1] + 1)]]
# collect random k of the biased support sets into one and leave the rest for the qeury set
spt_perm = np.random.permutation(spt_x.shape[0])
sx = np.concatenate((sx, spt_x[spt_perm[:k_shot]]))
sy = np.concatenate((sy, spt_y[spt_perm[:k_shot]]))
qx = np.concatenate((qx, spt_x[spt_perm[k_shot:]], qry_x))
qy = np.concatenate((qy, spt_y[spt_perm[k_shot:]], qry_y))
return sx, sy, qx, qy
class ToyDataset(Dataset):
def __init__(self, seed: int = 0, k_shot: int = 10, total_tasks: int = 100, test_shots: int = 50):
self.seed = seed
self.k_shot = k_shot
self.total_tasks = total_tasks
self.test_shots = test_shots
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def __len__(self) -> int:
return self.total_tasks
class MetaMoons(ToyDataset):
def __init__(
self,
seed: int = 0,
k_shot: int = 10,
total_tasks: int = 100,
test_shots: int = 50,
):
super().__init__(seed=seed, k_shot=k_shot, total_tasks=total_tasks, test_shots=test_shots)
self.n_way = 2
self.name = "moons"
self.path = os.path.join("toy-moons", "2-way", f"{k_shot}-shot", f"{test_shots}-testshot")
def __getitem__(self, i: int) -> Tuple[T, T, T, T]:
return self.gen_random_task()
def sample_uniform(self) -> T:
x = torch.linspace(-3, 3, 100)
return torch.stack(torch.meshgrid(x, x), dim=-1).view(-1, 2)
def gen_random_task(self) -> Tuple[T, T, T, T]:
noise = np.random.rand() * .25
x, y = make_moons(n_samples=self.n_way * (self.k_shot + self.test_shots), noise=noise, random_state=self.seed)
sx, sy, qx, qy = get_biased_sample_idx(x, y, self.k_shot)
sx, sy, qx, qy = torch.from_numpy(sx).float(), torch.from_numpy(sy).long(), torch.from_numpy(qx).float(), torch.from_numpy(qy).long()
return sx, sy, qx, qy
class MetaCircles(ToyDataset):
def __init__(
self,
seed: int = 0,
k_shot: int = 10,
total_tasks: int = 100,
test_shots: int = 50,
):
super().__init__(seed=seed, k_shot=k_shot, total_tasks=total_tasks, test_shots=test_shots)
self.n_way = 2
self.name = "circles"
self.path = os.path.join("toy-circles", "2-way", f"{k_shot}-shot", f"{test_shots}-testshot")
def __getitem__(self, i: int) -> Tuple[T, T, T, T]:
return self.gen_random_task()
def sample_uniform(self) -> T:
x = torch.linspace(-3, 3, 100)
return torch.stack(torch.meshgrid(x, x), dim=-1).view(-1, 2)
def gen_random_task(self) -> Tuple[T, T, T, T]:
noise = np.random.rand() * .25
scale = np.random.rand() * 0.8
x, y = make_circles(n_samples=self.k_shot + self.test_shots, noise=noise, factor=scale, random_state=self.seed)
sx, sy, qx, qy = get_biased_sample_idx(x, y, self.k_shot)
sx, sy, qx, qy = torch.from_numpy(sx).float(), torch.from_numpy(sy).long(), torch.from_numpy(qx).float(), torch.from_numpy(qy).long()
return sx, sy, qx, qy
class RandomGaussians(ToyDataset):
def __init__(
self,
seed: int = 0,
n_way: int = 5,
k_shot: int = 5,
total_tasks: int = 100,
test_shots: int = 15,
mu_rng: List[int] = [-5, 5],
var_rng: List[float] = [0.1, 1.0],
dim: int = 2
):
super().__init__(seed=seed, k_shot=k_shot, total_tasks=total_tasks, test_shots=test_shots)
self.name = "2d-gaussians"
self.mu_rng = mu_rng
self.n_way = n_way
self.var_rng = var_rng
self.var = var_rng
self.dim = dim
self.name = "gausian"
self.path = os.path.join("toy-gaussian", f"{n_way}-way", f"{k_shot}-shot", f"{test_shots}-testshot")
def sample_uniform(self) -> T:
x = torch.linspace(-3, 3, 100)
return torch.stack(torch.meshgrid(x, x), dim=-1).view(-1, self.dim)
def sample(self, N: torch.distributions.MultivariateNormal, variant: str = "uniform") -> Tuple[T, T]:
train, test = N.sample((self.k_shot,)).transpose(0, 1), N.sample((self.test_shots,)).transpose(0, 1)
return train, test
def gen_random_task(self) -> Tuple[T, T, T, T]:
# sample mus and sigmas uniformyl according to their range
mus = torch.rand((self.n_way, self.dim)) * (self.mu_rng[1] - self.mu_rng[0]) + self.mu_rng[0]
# decompose PSD sigma as O^TDO with orthogonal O's to make random PSD covariance
# https://stats.stackexchange.com/questions/2746/how-to-efficiently-generate-random-positive-semidefinite-correlation-matrices
O = torch.rand((self.n_way, self.dim, self.dim)) * 2 - 1
O = torch.qr(O)[0]
D = torch.stack([torch.eye(self.dim) * torch.rand(self.dim) for i in range(self.n_way)])
# make the eigenvectors be different lengths in order to make the direction elliptical ratio of 5:1
tmp = (torch.rand((self.n_way, self.dim)) * (self.var_rng[1] - self.var_rng[0]) + self.var_rng[0]).unsqueeze(1)
tmp[:, :, 1] = tmp[:, :, 0] / 5
D = D * tmp
sigmas = O.transpose(1, 2).bmm(D.bmm(O))
N = torch.distributions.MultivariateNormal(mus, sigmas)
labels = torch.randperm(self.n_way)
train_x, test_x = self.sample(N)
mu, sigma = train_x.mean(dim=(0, 1)), train_x.std(dim=(0, 1))
train_x = (train_x - mu) / sigma
test_x = (test_x - mu) / sigma
train_y = labels.unsqueeze(-1).repeat(1, self.k_shot)
test_y = labels.unsqueeze(-1).repeat(1, self.test_shots)
train_x, train_y, test_x, test_y = train_x.reshape(-1, self.dim).numpy(), train_y.reshape(-1).numpy(), test_x.reshape(-1, self.dim).numpy(), test_y.reshape(-1).numpy()
x, y = np.concatenate((train_x, test_x)), np.concatenate((train_y, test_y))
assert x.shape[0] % 2 == 0, f"x needs to be evenly divisible by 2 (got shape {x.shape}) for the toy Gaussian, if not you have to fix 'get biased sample function'"
sx, sy, qx, qy = get_biased_sample_idx(x, y, self.k_shot)
return torch.from_numpy(sx).float(), torch.from_numpy(sy).long(), torch.from_numpy(qx).float(), torch.from_numpy(qy).long()
def __getitem__(self, i: int) -> Tuple[T, T, T, T]:
return self.gen_random_task()
colors = [
"tab:blue", "tab:orange", "tab:green", "tab:red", "tab:purple",
"tab:brown", "tab:pink", "tab:gray", "tab:olive", "tab:cyan",
"mediumseagreen", "teal", "navy", "darkgoldenrod", "darkslateblue",
]
def get_color(i: int) -> Tuple[float, ...]:
if i < len(colors):
return to_rgba(colors[i]) # type: ignore
return (np.random.rand(), np.random.rand(), np.random.rand(), 1.0)
BATCH_SIZE = 3
SEED = 1
if __name__ == "__main__":
ds: Any
do_plots = ["moons", "circles", "gaussian"]
if "moons" in do_plots:
ds = MetaMoons(seed=SEED)
fig, axes = plt.subplots(nrows=1, ncols=BATCH_SIZE, figsize=(BATCH_SIZE * 7, 6))
for i, ax in enumerate(axes):
xtr, ytr, xte, yte = ds[0]
# this sample will be form a different task, but we are only taking the uniform noise so it is ok
ax.scatter(xtr[:, 0], xtr[:, 1], c=[get_color(v.item()) for v in ytr], s=50, edgecolors=(0, 0, 0, 0.5), linewidths=2.0)
ax.scatter(xte[:, 0], xte[:, 1], c=[get_color(v.item()) for v in yte], marker='*', s=20)
ax.set_title(f"task: {i}")
if i == BATCH_SIZE - 1:
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='train', markerfacecolor='black', markersize=10),
Line2D([0], [0], marker='*', color='w', label='test', markerfacecolor='black', markersize=10),
]
ax.legend(handles=legend_elements)
path = os.path.join("data", "examples", "toy-moons")
os.makedirs(path, exist_ok=True)
fig.tight_layout()
fig.savefig(os.path.join(path, "metatrain-example.pdf"))
fig.savefig(os.path.join(path, "metatrain-example.png"))
if "circles" in do_plots:
ds = MetaCircles(seed=SEED)
fig, axes = plt.subplots(nrows=1, ncols=BATCH_SIZE, figsize=(BATCH_SIZE * 7, 6))
for i, ax in enumerate(axes):
xtr, ytr, xte, yte = ds[0]
# this sample will be form a different task, but we are only taking the uniform noise so it is ok
ax.scatter(xtr[:, 0], xtr[:, 1], c=[get_color(v.item()) for v in ytr], s=50, edgecolors=(0, 0, 0, 0.5), linewidths=2.0)
ax.scatter(xte[:, 0], xte[:, 1], c=[get_color(v.item()) for v in yte], marker='*', s=20)
ax.set_title(f"task: {i}")
if i == BATCH_SIZE - 1:
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='train', markerfacecolor='black', markersize=10),
Line2D([0], [0], marker='*', color='w', label='test', markerfacecolor='black', markersize=10),
]
ax.legend(handles=legend_elements)
path = os.path.join("data", "examples", "toy-circles")
os.makedirs(path, exist_ok=True)
fig.tight_layout()
fig.savefig(os.path.join(path, "metatrain-example.pdf"))
fig.savefig(os.path.join(path, "metatrain-example.png"))
if "gaussian" in do_plots:
# RANDOM GAUSSIANS
ds = RandomGaussians(seed=SEED, k_shot=5, test_shots=15)
fig, axes = plt.subplots(nrows=1, ncols=BATCH_SIZE, figsize=(BATCH_SIZE * 7, 6))
for i, ax in enumerate(axes):
xtr, ytr, xte, yte = ds[0]
ax.scatter(xtr[:, 0], xtr[:, 1], c=[get_color(v.item()) for v in ytr], s=50, edgecolors=(0, 0, 0, 0.5), linewidths=2.0)
ax.scatter(xte[:, 0], xte[:, 1], c=[get_color(v.item()) for v in yte], marker='*', s=20)
ax.set_title(f"task: {i}")
if i == BATCH_SIZE - 1:
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='train', markerfacecolor='black', markersize=10),
Line2D([0], [0], marker='*', color='w', label='test', markerfacecolor='black', markersize=10),
]
ax.legend(handles=legend_elements)
path = os.path.join("data", "examples", "toy-gaussian")
os.makedirs(path, exist_ok=True)
fig.tight_layout()
fig.savefig(os.path.join(path, "metatrain-example.pdf"))
fig.savefig(os.path.join(path, "metatrain-example.png"))
| 42.590909 | 175 | 0.596174 |
84803c49f8f161c17140b63e68f64dbcf462292e | 4,449 | py | Python | src/flowrect/simulations/pdes/finite_pde.py | jokteur/ASMA | 25ac8a0455c680232d56c18d31de62c3188b7153 | [
"MIT"
] | 2 | 2021-11-01T09:13:17.000Z | 2022-03-08T14:34:16.000Z | src/flowrect/simulations/pdes/finite_pde.py | jokteur/ASMA | 25ac8a0455c680232d56c18d31de62c3188b7153 | [
"MIT"
] | null | null | null | src/flowrect/simulations/pdes/finite_pde.py | jokteur/ASMA | 25ac8a0455c680232d56c18d31de62c3188b7153 | [
"MIT"
] | null | null | null | import numpy as np
import time
from numba import jit, prange
from ..util import f_SRM
# Not used
@jit(nopython=True, cache=True)
def _fast_pde(
time_end, dt, N, a_grid_size, exp_a, Gamma, c, tau, lambda_kappa, I_ext, I_ext_time, interaction
):
""""""
steps = int(time_end / dt)
dim = Gamma.shape[0]
# Init vectors
ts = np.linspace(0, time_end, steps)
rho_t = np.zeros((steps, a_grid_size))
rho2_t = np.zeros((steps, a_grid_size))
m_t = np.zeros((steps, dim))
x_t = np.zeros(steps)
noise = np.random.rand(steps)
A = np.zeros(steps)
S = np.zeros((steps, a_grid_size))
Abar = np.zeros(steps)
# Vector of indices that goes from a=0 to a=a_cutoff
a_indices = np.arange(a_grid_size)
# This vector is used to build a matrix of indices that are used in the S(t,a) fct
# Only the lower part of the matrix will be used
a_idx_matrix = -a_indices.reshape((a_grid_size, 1)) + a_indices.reshape((1, a_grid_size))
rho_t[0, 0] = 1 / dt
# interaction = J from our equations
J = interaction
da = dt
# Initial step
x_fixed = I_ext if I_ext_time == 0 else 0
m_t_sum = np.sum(exp_a * m_t[0], axis=1)
f = f_SRM(m_t_sum + x_t[0], tau=tau, c=c)
for s in range(1, steps):
x_fixed = I_ext if I_ext_time < dt * s else 0
m_t[s] = m_t[s - 1] + dt * np.sum(
(Gamma - (1 - exp_a) * m_t[s - 1]).T * f * rho_t[s - 1] * da, axis=1
)
x_t[s] = x_t[s - 1] + dt * (
-lambda_kappa * x_t[s - 1]
+ lambda_kappa * (np.sum(f * rho_t[s - 1] * da) * J + x_fixed)
)
m_t_sum = np.sum(exp_a * m_t[s], axis=1)
f = f_SRM(m_t_sum + x_t[s], c=c)
# Copy previous density
rho_t[s] = rho_t[s - 1]
S_f_idx = s + a_idx_matrix # t_n - a_i + a_k'
# For s < a_cutoff, we do not want to look past ages greater than the current time
# of the simulation
for a in range(min(s, a_grid_size)):
idx = S_f_idx[a, :a]
m_t_sum = np.sum(exp_a[:a] * m_t[idx], axis=1)
S[s, a] = np.exp(-np.sum(f_SRM(m_t_sum + x_t[idx])) * da)
# Once S has been determined, we can calculate Abar
intensity = np.clip(f * dt, 0, 1) # Limit intensity to 1
S_int_vec = (1 - S[s]) * rho_t[s]
S_sum = np.sum(S_int_vec)
Abar[s] = np.sum(rho_t[s] * intensity)
if S_sum > 0:
correction_factor = np.sum(S_int_vec * f) / S_sum # da can be simplified
Abar[s] += correction_factor * (1 - np.sum(rho_t[s] * da))
# Calculate the activity A
p = Abar[s] * dt
p = 1 if p > 1 else p
K = np.random.binomial(N, p)
A[s] = 1 / N * K
# Mass loss on each cell
intensity = np.clip(f * dt, 0, 1) # Limit transfer
mass_transfer = rho_t[s] * intensity
rho_t[s] -= mass_transfer
lass_cell_mass = rho_t[s, -1] # Last cell necessarely spikes
# Linear transport
rho_t[s, 1:] = rho_t[s, :-1]
# Mass insertion
rho_t[s, 0] = A[s] * dt
return ts, rho_t, m_t, x_t, A, Abar, S
def FR_finite_fluctuations(
time_end,
dt,
Lambda,
Gamma,
c,
lambda_kappa,
I_ext=0,
I_ext_time=0,
interaction=0,
N=500,
tau=1,
a_cutoff=5,
epsilon=1e-8,
):
if isinstance(Gamma, (float, int)):
Gamma = [Gamma]
if isinstance(Lambda, (float, int)):
Lambda = [Lambda]
Gamma = np.array(Gamma)
Lambda = np.array(Lambda)
dim = Gamma.shape[0]
# Need dt = da
a_grid_size = int(a_cutoff / dt)
a_grid = np.linspace(0, a_cutoff, a_grid_size)
a_d_grid = np.vstack((a_grid,) * dim).T
# Shape must be in order: len, d, d
exp_a = np.exp(-Lambda * a_d_grid)
# Simulation
ts, rho_t, m_t, x_t, A, Abar, S = _fast_pde(
time_end,
dt,
N,
a_grid_size,
exp_a,
Gamma,
c,
tau,
lambda_kappa,
I_ext,
I_ext_time,
interaction,
)
energy_conservation = np.sum(rho_t * dt, axis=-1)
activity = rho_t[:, 0]
return ts, a_grid, rho_t, m_t, x_t, energy_conservation, activity, A, Abar, S
# Trigger compilation
# print("Compilation of flowrect with finite size fluctuations")
# ret = FR_finite_fluctuations(
# time_end=0.5, dt=0.5, Lambda=[1, 1], Gamma=[-1, -1], c=1, lambda_kappa=1, a_cutoff=1
# ) | 28.519231 | 100 | 0.564621 |
f1f6912520076dc46ef727d81f9a73b632dbcb4e | 3,876 | py | Python | program3solution/restaurant.py | dpocheng/Python-Data-Structure-of-Python | 66b99fa41f831e8c3089e4e2fd664a7ef3268e5e | [
"Apache-2.0"
] | null | null | null | program3solution/restaurant.py | dpocheng/Python-Data-Structure-of-Python | 66b99fa41f831e8c3089e4e2fd664a7ef3268e5e | [
"Apache-2.0"
] | null | null | null | program3solution/restaurant.py | dpocheng/Python-Data-Structure-of-Python | 66b99fa41f831e8c3089e4e2fd664a7ef3268e5e | [
"Apache-2.0"
] | null | null | null | __author__ = 'dgk'
# RESTAURANT COLLECTION PROGRAM
# ICS 31, UCI, David G. Kay, Fall 2012
# Implement Restaurant as a namedtuple, collection as a list
##### MAIN PROGRAM (CONTROLLER)
def restaurants(): # nothing -> interaction
""" Main program
"""
print("Welcome to the restaurants program!")
our_rests = Collection_new()
our_rests = handle_commands(our_rests)
print("\nThank you. Good-bye.")
MENU = """
Restaurant Collection Program --- Choose one
a: Add a new restaurant to the collection
r: Remove a restaurant from the collection
s: Search the collection for selected restaurants
p: Print all the restaurants
q: Quit
"""
def handle_commands(C: list) -> list:
""" Display menu, accept and process commands.
"""
while True:
response = input(MENU)
if response=="q":
return C
elif response=='a':
r = Restaurant_get_info()
C = Collection_add(C, r)
elif response=='r':
n = input("Please enter the name of the restaurant to remove: ")
C = Collection_remove_by_name(C, n)
elif response=='p':
print(Collection_str(C))
elif response=='s':
n = input("Please enter the name of the restaurant to search for: ")
for r in Collection_search_by_name(C, n):
print(Restaurant_str(r))
else:
invalid_command(response)
def invalid_command(response): # string -> interaction
""" Print message for invalid menu command.
"""
print("Sorry; '" + response + "' isn't a valid command. Please try again.")
##### Restaurant
from pcollections import pnamedtuple
Restaurant = pnamedtuple('Restaurant', 'name cuisine phone dish price')
# Constructor: r1 = Restaurant('Taillevent', 'French', '01-11-22-33-44', 'Escargots', 23.50)
def Restaurant_str(self: Restaurant) -> str:
return (
"Name: " + self.name + "\n" +
"Cuisine: " + self.cuisine + "\n" +
"Phone: " + self.phone + "\n" +
"Dish: " + self.dish + "\n" +
"Price: ${:2.2f}".format(self.price) + "\n\n")
def Restaurant_get_info() -> Restaurant:
""" Prompt user for fields of Restaurant; create and return.
"""
return Restaurant(
input("Please enter the restaurant's name: "),
input("Please enter the kind of food served: "),
input("Please enter the phone number: "),
input("Please enter the name of the best dish: "),
float(input("Please enter the price of that dish: ")))
#### COLLECTION
# A collection is a list of restaurants
def Collection_new() -> list:
''' Return a new, empty collection
'''
return [ ]
def Collection_str(C: list) -> str:
''' Return a string representing the collection
'''
s = ""
for r in C:
s = s + Restaurant_str(r)
return s
def Collection_search_by_name(C: list, name: str) -> list:
""" Return list of Restaurants in input list whose name matches input string.
"""
result = [ ]
for r in C:
if r.name == name:
result.append(r)
return result
# alternative (using a list comprehension):
# return [r for r in C if r.name == name]
def Collection_add(C: list, R: Restaurant) -> list:
""" Return list of Restaurants with input Restaurant added at end.
"""
C.append(R)
return C
def Collection_remove_by_name(C: list, name: str) -> list:
""" Given name, remove all Restaurants with that name from collection.
"""
result = [ ]
for r in C:
if r.name != name:
result.append(r)
return result
# Alternative:
# return [r for r in self.rests if r.name != name]
restaurants()
| 30.761905 | 95 | 0.586945 |
bcc673dd7b26b0e2da58cfc4d7b6485f74fa69e7 | 32,107 | py | Python | distributed_social_network/api/tests.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | null | null | null | distributed_social_network/api/tests.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | 51 | 2019-03-22T00:31:06.000Z | 2021-06-10T21:17:30.000Z | distributed_social_network/api/tests.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | 1 | 2019-08-03T14:41:22.000Z | 2019-08-03T14:41:22.000Z | """
Notes:
- Unauthenticated requests are not tested because it's a check done by
DjangoRestFramework (default permission set in settings.py). The response
will always be 401. There is no reason to check this.
- Serializer and paginator generated response is not checked.
- Most response formatting is not checked. The content is checked, but
format is not, unless it's a response not created by a serializer.
"""
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from posts.models import Post, Comment
from users.models import User, Node
from posts.utils import Visibility
import api.views as views
from rest_framework.test import force_authenticate
from django.urls import reverse
from django.conf import settings
import uuid
from urllib.parse import quote, urljoin
def create_test_user(username="test", active=True):
return User.objects.create_user(
username=username,
email="test@test.com",
bio="Hello world",
password="aNewPw019",
is_active=active
)
def create_foreign_user(username="alien", hostname="http://testhost.com/", prefix="api/", api_user="apiUser"):
node_usr = User.objects.create_user(
username=api_user,
email="api@user.com",
password="aNewPw019",
is_active=True
)
node = Node.objects.create(
hostname=hostname,
prefix=prefix,
user_auth=node_usr,
send_username="test",
send_password="test",
active=True
)
user = User.objects.create_user(
host=node,
username=username,
password="aNewPw019",
local=False,
is_active=True
)
return user
def create_friend(username, usr):
friend = User.objects.create_user(
username=username,
email="friend@test.com",
bio="Chicken",
password="aNewPw019",
is_active=True
)
friend.friends.add(usr)
return friend
def create_comment(post, author, comment='Test Comment'):
return Comment.objects.create(
author=author,
post=post,
comment=comment
)
def create_test_post(author, title="test post", content="test", visibility=Visibility.PUBLIC, visbleTo=[]):
post = Post.objects.create(
title=title,
content=content,
author=author,
visibility=visibility
)
post.origin = urljoin(settings.HOSTNAME, f"/api/posts/{post.id}")
post.source = urljoin(settings.HOSTNAME, f"/api/posts/{post.id}")
post.save()
post.visible_to.add(*visbleTo)
return post
class TestPostEndpoints(APITestCase):
"""
Tests for the endpoints
- /posts (GET)
- /posts/post_id (GET)
"""
@classmethod
def setUpTestData(cls):
cls.user = create_test_user()
cls.friend = create_friend("friend", cls.user)
cls.foaf = create_friend("foaf", cls.friend)
cls.post = create_test_post(cls.user, title="public")
cls.comment = create_comment(cls.post, cls.friend)
cls.private_post = create_test_post(cls.user, title="private", visibility=Visibility.PRIVATE)
cls.foaf_post = create_test_post(cls.user, title="foaf", visibility=Visibility.FOAF)
cls.factory = APIRequestFactory()
def test_get_post_list(self):
"""
Tests getting all public posts on server.
endpoint: /posts
"""
request = self.factory.get("api/posts")
force_authenticate(request, user=self.user)
response = views.PostViewSet.as_view({"get": "list"})(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data["posts"]), 1)
# check that the only post is public (there only is one post!)
self.assertEqual(response.data["posts"][0]["visibility"], "Public")
def test_get_post_detail_dne(self):
"""
Try to get details for a post that doesn't exist
"""
# some uuid (chances that it's uuid is an actual post are slim)
pid = uuid.uuid4()
request = self.factory.get(reverse('api-posts-detail', args=(pid,)),)
force_authenticate(request, user=self.user)
response = views.PostViewSet.as_view({"get": "retrieve"})(request, pk=pid)
self.assertEqual(response.status_code, 404)
def test_get_post_detail(self):
"""
Get post detail of a post that exists.
"""
request = self.factory.get(reverse('api-posts-detail', args=(self.post.id,)))
force_authenticate(request, user=self.user)
response = views.PostViewSet.as_view({"get": "retrieve"})(request, pk=self.post.id)
self.assertEqual(str(self.post.id), response.data["post"]["id"])
self.assertIn("getPost", response.data["query"])
self.assertIn(str(self.comment.id), response.data["post"]["comments"][0]["id"])
class TestAuthorPostEndpoints(APITestCase):
"""
Test for the endpoints
- /author/posts (GET and POST)
- /author/<author_id>/posts (GET)
"""
@classmethod
def setUpTestData(cls):
cls.user = create_test_user()
cls.friend = create_friend("friend", cls.user)
cls.foaf = create_friend("foaf", cls.friend)
cls.alien = create_foreign_user()
cls.alien.friends.add(cls.user)
cls.post = create_test_post(cls.user)
cls.usr_private = create_test_post(cls.user, title='private to user', visibility=Visibility.PRIVATE)
cls.visibleToAlien = create_test_post(cls.user, title='visible to alient', visibility=Visibility.PRIVATE, visbleTo=[cls.alien])
cls.foaf = create_test_post(cls.friend, title="foaf", visibility=Visibility.FOAF)
cls.private = create_test_post(cls.friend, visibility=Visibility.PRIVATE)
cls.factory = APIRequestFactory()
def test_author_posts_no_header_get(self):
"""
Test /author/posts GET without X-User header
"""
request = self.factory.get(reverse("api-author-post"))
force_authenticate(request, self.user)
response = views.AuthorPostView.as_view()(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {"detail": "Missing X-User Header Field"})
def test_author_posts_get(self):
"""
Test /author/posts GET with the X-User header, where user is recognized
"""
request = self.factory.get(
reverse("api-author-post"),
HTTP_X_USER=self.alien.get_url())
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorPostView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data["posts"]), 3)
# some formatting checks
self.assertEqual(response.data["query"], "posts")
self.assertDictContainsSubset({"count": 3}, response.data)
# grab the post ids from the response
post_ids = [p["id"] for p in response.data["posts"]]
# Check that the correct posts are in the response.
self.assertIn(str(self.post.id), post_ids)
self.assertIn(str(self.foaf.id), post_ids)
self.assertIn(str(self.visibleToAlien.id), post_ids)
self.assertNotIn(str(self.private), post_ids)
def test_author_posts_no_header_post(self):
"""
Test author/posts POST without X-User header
"""
request = self.factory.post(
reverse("api-author-post"),
{
"query": "createPost",
"post": {
"id": uuid.uuid4()
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorPostView.as_view()(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {"detail": "Missing X-User Header Field"})
def test_author_posts_non_match(self):
"""
Test author/posts POST with header, where the author in post does not
match the header user
"""
request = self.factory.post(
reverse("api-author-post"),
{
"query": "createPost",
"post": {
"id": "069cc2ae-d9da-45e5-9604-9f133a2184fa",
"title": "Some post about chickens",
"source": "http://testhost.com/api/posts/069cc2ae-d9da-45e5-9604-9f133a2184fa",
"origin": "http://testhost.com/api/posts/069cc2ae-d9da-45e5-9604-9f133a2184fa",
"description": "ducks",
"contentType": "text/plain",
"content": "quack",
"author": {
"id": "http://testhost.com/api/author/bce6b38b-591d-4ca7-9c8e-96b50bf58cce",
"host": "http://testhost.com/api/",
"firstName": "",
"lastName": "",
"displayName": "alien",
"url": "http://testhost.com/api/author/bce6b38b-591d-4ca7-9c8e-96b50bf58cce",
"github": None
},
"categories": [],
"comments": [],
"published": "2019-04-04T03:38:39.886652Z",
"visibility": "Public",
"visibleTo": [],
"unlisted": False
}
},
format="json",
HTTP_X_USER=self.alien.get_url()
)
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorPostView.as_view()(request)
self.assertEqual(response.status_code, 400)
self.assertDictContainsSubset(response.data, {'message': 'Author of post does not match authenticated user.', 'query': 'createPost', 'type': False})
def test_author_posts_invalid(self):
"""
Test author/pots POST with header, where data is not invalid
"""
request = self.factory.post(
reverse("api-author-post"),
{
"id": "069cc2ae-d9da-45e5-9604-9f133a2184fa",
"title": "Some post about chickens",
"source": "http://testhost.com/api/posts/069cc2ae-d9da-45e5-9604-9f133a2184fa",
"origin": "http://testhost.com/api/posts/069cc2ae-d9da-45e5-9604-9f133a2184fa",
"description": "ducks",
"contentType": "text/plain",
"content": "quack",
"author": {
"id": self.alien.get_url(),
"host": "http://testhost.com/api/",
"firstName": "",
"lastName": "",
"displayName": "alien",
"url": self.alien.get_url(),
"github": None
},
"categories": [],
"comments": [],
"unlisted": False
},
format="json",
HTTP_X_USER=self.alien.get_url()
)
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorPostView.as_view()(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {'message': 'Malformed request', 'query': 'createPost', 'type': False})
def test_author_posts_valid(self):
"""
author/posts POST with header and valid post body
"""
request = self.factory.post(
reverse("api-author-post"),
{
"query": "createPost",
"post": {
"id": "069cc2ae-d9da-45e5-9604-9f133a2184fa",
"title": "Some post about chickens",
"source": "http://testhost.com/api/posts/069cc2ae-d9da-45e5-9604-9f133a2184fa",
"origin": "http://testhost.com/api/posts/069cc2ae-d9da-45e5-9604-9f133a2184fa",
"description": "ducks",
"contentType": "text/plain",
"content": "quack",
"author": {
"id": self.alien.get_url(),
"host": "http://testhost.com/api/",
"firstName": "",
"lastName": "",
"displayName": "alien",
"url": self.alien.get_url(),
"github": None
},
"categories": [],
"comments": [],
"published": "2019-04-04T03:38:39.886652Z",
"visibility": "Public",
"visibleTo": [],
"unlisted": False
}
},
format="json",
HTTP_X_USER=self.alien.get_url()
)
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorPostView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(response.data, {'message': 'Post created', 'query': 'createPost', 'type': True})
self.assertTrue(Post.objects.filter(id="069cc2ae-d9da-45e5-9604-9f133a2184fa").exists())
def test_author_id_posts_no_header(self):
"""
Tests /author/<id>/posts endpoint without the X-User header
"""
request = self.factory.get(
reverse("api-author-posts", args=(str(self.user.id), )),
)
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorViewset.as_view({'get': 'posts'})(request, pk=str(self.user.id))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {"detail": "Missing X-User Header Field"})
def test_author_id_posts_get(self):
"""
Tests /author/<id>/posts with the X-User header, and a existant user
"""
request = self.factory.get(
reverse("api-author-posts", args=(str(self.user.id), )),
HTTP_X_USER=self.alien.get_url()
)
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorViewset.as_view({'get': 'posts'})(request, pk=str(self.user.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 2)
# get the ids of returned posts
pids = [p["id"] for p in response.data["posts"]]
self.assertIn(str(self.post.id), pids)
self.assertIn(str(self.visibleToAlien.id), pids)
class TestCommentEndpoints(APITestCase):
"""
Tests for the endpoint /posts/<post_id>/comments (GET and POST)
"""
@classmethod
def setUpTestData(cls):
cls.user = create_test_user()
cls.post = create_test_post(cls.user)
cls.post2 = create_test_post(cls.user, content="test post 2", visibility=Visibility.PRIVATE)
cls.alien = create_foreign_user()
cls.comment = create_comment(cls.post, cls.alien)
cls.comment2 = create_comment(cls.post2, cls.user)
cls.factory = APIRequestFactory()
def test_comment_get(self):
"""
Tests getting comments of a post
/posts/<id>/comments
"""
request = self.factory.get(
reverse('api-comments', args=(str(self.post.id), ))
)
force_authenticate(request, self.alien.host.user_auth)
response = views.CommentView.as_view()(request, pk=str(self.post.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
cids = [c["id"] for c in response.data["comments"]]
self.assertEqual(len(cids), 1)
self.assertIn(str(self.comment.id), cids)
def test_comment_post_unknown_foreign_author(self):
"""
Tests /posts/<id>/comments POST with an unknown foreign author
"""
c_id = "de305d54-75b4-431b-adb2-eb6b9e546013"
request = self.factory.post(
reverse('api-comments', args=(str(self.post.id), )),
{
"query": "addComment",
"post": {
"id": c_id,
"contentType": "text/plain",
"comment": "Let's be frands!",
"published": "2019-03-09T13:07:04+00:00",
"author": {
"id": "http://testhost.com/e2c0c9ad-c518-42d4-9eb6-87c40f2ca151",
"email": "unknown@test.com",
"bio": "test",
"host": "http://testhost.com",
"firstName": "",
"lastName": "",
"displayName": "unknown",
"url": "http://testhost.com/e2c0c9ad-c518-42d4-9eb6-87c40f2ca151",
"github": None
}
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.CommentView.as_view()(request, pk=str(self.post.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, {"query": "addComment", "type": True, "message": "Comment added"})
self.assertTrue(Comment.objects.filter(id=c_id).exists())
def test_comment_post_403(self):
"""
Tests posting a comment on a private post.
"""
c_id = "de305d54-75b4-431b-adb2-eb6b9e546013"
request = self.factory.post(
reverse('api-comments', args=(str(self.post2.id), )),
{
"query": "addComment",
"post": {
"id": c_id,
"contentType": "text/plain",
"comment": "Let's be frands!",
"published": "2019-03-09T13:07:04+00:00",
"author": {
"id": self.alien.get_url(),
"email": self.alien.email,
"bio": self.alien.bio,
"host": str(self.alien.host),
"firstName": "",
"lastName": "",
"displayName": self.alien.username,
"url": self.alien.get_url(),
"github": None
}
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.CommentView.as_view()(request, pk=str(self.post2.id))
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data, {"query": "addComment", "type": False, "message": "Comment not allowed"})
self.assertFalse(Comment.objects.filter(id=c_id).exists())
def test_comment_post_known_author(self):
"""
Tests /posts/<id>/comments POST with a known author of comment
(foreign or local doesn't really matter here)
"""
c_id = "de305d54-75b4-431b-adb2-eb6b9e546013"
request = self.factory.post(
reverse('api-comments', args=(str(self.post.id), )),
{
"query": "addComment",
"post": {
"id": c_id,
"contentType": "text/plain",
"comment": "Let's be frands!",
"published": "2019-03-09T13:07:04+00:00",
"author": {
"id": self.alien.get_url(),
"email": self.alien.email,
"bio": self.alien.bio,
"host": str(self.alien.host),
"firstName": "",
"lastName": "",
"displayName": self.alien.username,
"url": self.alien.get_url(),
"github": None
}
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.CommentView.as_view()(request, pk=str(self.post.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, {"query": "addComment", "type": True, "message": "Comment added"})
self.assertTrue(Comment.objects.filter(id=c_id).exists())
def test_comment_post_invalid(self):
"""
Test /posts/<id>/comments POST with malformed data
"""
request = self.factory.post(
reverse('api-comments', args=(str(self.post.id), )),
{
"query": "addComment",
"post": {
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.CommentView.as_view()(request, pk=str(self.post.id))
self.assertEqual(response.status_code, 400)
class TestFriendsEndpoints(APITestCase):
"""
Test for the endpoints
- /author/<author_id>/friends (GET and POST)
- /author/<author1_id>/friends/<author2_id> (GET)
"""
@classmethod
def setUpTestData(cls):
cls.user = create_test_user()
cls.friend = create_friend("friend1", cls.user)
cls.friend2 = create_friend("friend2", cls.user)
cls.not_friend = create_test_user("unfrandly")
cls.alien = create_foreign_user()
cls.factory = APIRequestFactory()
def test_friends_get_exists(self):
"""
Get friends of an existant author on local server
Tests getting a list of friends of an author.
endpoint: /user/<userid>/friends
"""
request = self.factory.get(reverse('api-author-friends', args=(str(self.user.id),)))
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendsView.as_view()(request, pk=str(self.user.id),)
self.assertEqual(response.status_code, 200)
self.assertIn(self.friend.get_url(), response.data["authors"])
self.assertIn(self.friend2.get_url(), response.data["authors"])
self.assertEqual(len(response.data["authors"]), 2)
def test_friends_get_unknown(self):
"""
Get friends of a non-extant author on local server.
expects a 404 response.
"""
uid = str(uuid.uuid4())
request = self.factory.get(reverse('api-author-friends', args=(uid,)))
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendsView.as_view()(request, pk=uid,)
self.assertEqual(response.status_code, 404)
def test_friends_post_malformed(self):
"""
test malformed POST to /user/<id>/friends
"""
uid = str(self.user.id)
request = self.factory.post(
reverse('api-author-friends', args=(uid,)),
{
"query": "friends",
"author": "author_id"
},
format="json",)
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendsView.as_view()(request, pk=uid)
self.assertEqual(response.status_code, 400)
def test_friends_post_valid(self):
"""
tests POST to /user/<id>/friends
"""
uid = str(self.user.id)
request = self.factory.post(
reverse('api-author-friends', args=(uid,)),
{
"query": "friends",
"author": "author_id",
"authors": [
self.friend.get_url(),
self.alien.get_url(),
self.not_friend.get_url()
]
},
format="json",)
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendsView.as_view()(request, pk=uid)
self.assertEqual(response.status_code, 200)
self.assertIn(self.friend.get_url(), response.data["authors"])
self.assertEqual(len(response.data["authors"]), 1)
def test_author_friend_id_true(self):
"""
tests GET to /author/<id1>/friends/<id2>,
where id2 is a friend
"""
pk1 = str(self.user.id)
pk2 = self.friend.get_url()
request = self.factory.get(
reverse("api-check-friends", args=(pk1, quote(pk2, safe="~()*!.'")))
)
force_authenticate(request, self.alien.host.user_auth)
view = views.AreFriendsView.as_view()
response = view(request, pk1, quote(pk2, safe="~()*!.'"))
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.data, {
"query": "friends",
"friends": True,
"authors": [
self.user.get_url(),
self.friend.get_url()
]
})
def test_author_friend_id_false(self):
"""
tests GET to /author/<id1>/friends/<id2>
where id2 is not a friend
"""
pk1 = str(self.user.id)
pk2 = self.not_friend.get_url()
request = self.factory.get(
reverse("api-check-friends", args=(pk1, quote(pk2, safe="~()*!.'")))
)
force_authenticate(request, self.alien.host.user_auth)
view = views.AreFriendsView.as_view()
response = view(request, pk1, quote(pk2, safe="~()*!.'"))
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.data, {
"query": "friends",
"friends": False,
"authors": [
self.user.get_url(),
self.not_friend.get_url()
]
})
class TestFriendRequestEndpoint(APITestCase):
"""
Tests for endpoint /friendrequest (POST)
"""
@classmethod
def setUpTestData(cls):
cls.user = create_test_user()
cls.user2 = create_test_user(username="test2")
cls.alien = create_foreign_user()
cls.factory = APIRequestFactory()
def test_friend_does_not_exist(self):
"""
Test that the local "friend" parameter does not exists
Should return an error
"""
request = self.factory.post(
reverse('api-friend-request'),
{
"query": "friendrequest",
"author": {
"id": "http://testhost.com/author/7a51bda7-00ca-4689-a58a-6711a07a828c",
"host": "http://testhost.com",
"displayName": "Jane Doe",
"url": "http://testhost.com/author/7a51bda7-00ca-4689-a58a-6711a07a828c"
},
"friend": {
"id": urljoin(settings.HOSTNAME, "/api/author/ae09b70a-1030-4e05-bb56-e9336325d93a"),
"host": settings.HOSTNAME,
"displayName": "Jane Doe",
"url": urljoin(settings.HOSTNAME, "/api/author/ae09b70a-1030-4e05-bb56-e9336325d93a")
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendRequestView.as_view()(request)
self.assertEqual(response.status_code, 404)
def test_known_remote_author(self):
"""
author is a user we have seen before
"""
request = self.factory.post(
reverse('api-friend-request'),
{
"query": "friendrequest",
"author": {
"id": self.alien.get_url(),
"host": str(self.alien.host),
"displayName": self.alien.username,
"url": self.alien.get_url()
},
"friend": {
"id": self.user.get_url(),
"host": settings.HOSTNAME,
"displayName": self.user.username,
"url": self.user.get_url()
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendRequestView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.data, {
"query": "friendrequest",
"success": True,
"message": "Friend request sent"
})
self.assertTrue(self.user.incomingRequests.filter(pk=self.alien.id))
self.assertTrue(self.alien.outgoingRequests.filter(pk=self.user.id))
def test_unknown_remote_author(self):
"""
Test with an unknown author from a foreign host.
Author does exist on foreign host.
"""
rid = uuid.uuid4()
request = self.factory.post(
reverse('api-friend-request'),
{
"query": "friendrequest",
"author": {
"id": f"http://testhost.com/author/{rid}",
"host": str(self.alien.host),
"displayName": "JaneDoe",
"url": f"http://testhost.com/author/{rid}"
},
"friend": {
"id": self.user.get_url(),
"host": settings.HOSTNAME,
"displayName": self.user.username,
"url": self.user.get_url()
}
},
format="json"
)
force_authenticate(request, self.alien.host.user_auth)
response = views.FriendRequestView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.data, {
"query": "friendrequest",
"success": True,
"message": "Friend request sent"
})
self.assertTrue(self.user.incomingRequests.filter(username="JaneDoe"))
self.assertTrue(User.objects.filter(username="JaneDoe").exists())
class TestAuthorEndpoint(APITestCase):
"""
Tests for the endpoint /author/<author_id>
"""
@classmethod
def setUpTestData(cls):
cls.user = create_test_user()
cls.alien = create_foreign_user()
cls.inactive_user = create_test_user(username="ded", active=False)
cls.friend = create_friend("frand", cls.user)
cls.factory = APIRequestFactory()
def test_author_id_get_exists(self):
"""
author id exists
"""
request = self.factory.get('api-author-detail', args=(str(self.user.id),))
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorViewset.as_view({'get': 'retrieve'})(request, pk=str(self.user.id))
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset({
"id": self.user.get_url(),
"host": settings.HOSTNAME,
"displayName": self.user.username,
"url": self.user.get_url()
}, response.data,)
def test_author_id_get_dne(self):
"""
Author with id does not exists. should return an error
"""
uid = str(uuid.uuid4())
request = self.factory.get('api-author-detail', args=(uid,))
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorViewset.as_view({'get': 'retrieve'})(request, pk=uid)
self.assertEqual(response.status_code, 404)
def test_author_list(self):
"""
Tests that all active local authors are listed.
"""
request = self.factory.get('api-author-list')
force_authenticate(request, self.alien.host.user_auth)
response = views.AuthorViewset.as_view({'get': 'list'})(request)
self.assertEqual(response.status_code, 200)
uids = [u["id"] for u in response.data["authors"]]
self.assertIn(self.user.get_url(), uids)
self.assertNotIn(self.inactive_user.get_url(), uids)
self.assertNotIn(self.alien.get_url(), uids) | 35.753898 | 156 | 0.554614 |
6c8f8b8ead86072718503b986b703e856db5838f | 144,922 | py | Python | xpsi/module_generator.py | DevarshiChoudhury/xpsi | 200b82b4ef4a4e7342fc30dd03c5821cff0031c2 | [
"MIT"
] | 4 | 2021-12-09T21:02:50.000Z | 2022-01-29T08:59:35.000Z | xpsi/module_generator.py | DevarshiChoudhury/xpsi | 200b82b4ef4a4e7342fc30dd03c5821cff0031c2 | [
"MIT"
] | 11 | 2021-11-02T15:32:11.000Z | 2022-03-17T13:52:15.000Z | xpsi/module_generator.py | DevarshiChoudhury/xpsi | 200b82b4ef4a4e7342fc30dd03c5821cff0031c2 | [
"MIT"
] | 2 | 2021-11-02T15:12:19.000Z | 2022-01-06T19:34:10.000Z | from __future__ import division, print_function
import os
import sys
import six
import xpsi
def write(filename, module):
with open(filename, 'w') as mod:
_module = ''''''
for _line in module.splitlines():
if _module:
_module += '\n'
_module += _line.rstrip()
mod.write(_module)
nindent = '\n '
indent = ' '
import argparse
import re
class ArgumentParserCustom(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
if (re.match(r'^[\s]*#', arg_line) or # look for any number of whitespace characters up to a `#` character
re.match(r'^[\s]*$', arg_line)): # look for lines containing nothing or just whitespace
return []
else:
try:
_idx = arg_line.index('#')
except ValueError:
pass
else:
arg_line = arg_line[:_idx].rstrip()
if xpsi._verbose:
print(arg_line)
return [arg_line]
def add_argument(self, *args, **kwargs):
if kwargs.pop('destined_for_config_file', True) and args[0] != '-h':
_ = (args[0],
kwargs.get('default', None),
kwargs.get('nargs', 1) if kwargs.get('action') != 'store_true' else 0,
kwargs.pop('comment_line_above', None),
kwargs.pop('empty_lines_below', 0),
kwargs.pop('comment', False),
kwargs.pop('inline_comment', None),
kwargs.get('action', None))
try:
self._config_file_args
except AttributeError:
self._config_file_args = [_]
else:
self._config_file_args.append(_)
else:
_ = kwargs.pop('comment_line_above', None)
_ = kwargs.pop('empty_lines_below', 0)
_ = kwargs.pop('comment', False)
_ = kwargs.pop('inline_comment', None)
super(ArgumentParserCustom, self).add_argument(*args, **kwargs)
class GenerateConfigAction(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(GenerateConfigAction, self).__init__(option_strings, dest, nargs=0, **kwargs)
@staticmethod
def _typeset(arg, default, nargs, comment_line_above, empty_lines_below, comment, inline_comment, action, newline=True):
entry = '\n' if newline else ''
if comment_line_above is not None:
if comment_line_above == 'rule':
entry += '#' + '-'*78 + '#\n'
else:
_ = '## {} ##'.format(comment_line_above)
entry += '##' + '-' * (len(_) - 4) + '##\n' + _ + '\n##' + '-' * (len(_) - 4) + '##\n'
_ = ' ## {}'.format(inline_comment) if inline_comment is not None else ''
if not _ and isinstance(nargs, int) and nargs > 1:
_ = ' ## enter {} values below, one per empty line'.format(nargs)
elif not _ and not isinstance(nargs, int):
_ = ' ## enter code below, one statement per line'
if isinstance(default, list):
for i, _default in enumerate(default):
entry += '{5}{0}{1}{2}{3}{4}'.format('#' if comment else '',
'' if (i > 0 and nargs != 1) else arg,
'' if nargs != 1 else '=',
'' if (i == 0 and nargs != 1) else str(_default),
(_ if i == 0 else '') + ('\n{0}{1}'.format('#' if comment else '', str(_default)) if i == 0 and nargs != 1 else ''),
'\n' if i > 0 else '')
else:
entry += '{0}{1}{2}{3}{4}'.format('#' if comment else '',
arg,
'=' if nargs == 1 else '',
_ if nargs != 1 else (str(default) if default is not None else ''),
('\n' + str(default) if default is not None else '') if nargs != 1 else _)
if action == 'append':
entry += '\n#{0}='.format(arg)
if isinstance(nargs, int) and nargs > 1:
entry += '\n' * nargs
elif isinstance(nargs, str):
entry += '\n' * 3
entry += '\n' * empty_lines_below
return entry
def __call__(self, parser, namespace, values, option_string=None):
for _ in parser._config_file_args:
try:
config_file
except NameError:
config_file = self._typeset(*_, newline=False)
else:
config_file += self._typeset(*_)
with open('./generate.ini', 'w') as file:
file.write(config_file)
print('Configuration file generated.')
parser.exit()
parser = ArgumentParserCustom(
description='''
Script for automated generation of X-PSI model module set.
Usage: python %(prog)s [-h] @<generate.ini>
''',
fromfile_prefix_chars='@')
parser.add_argument('--generate-config-file', default=argparse.SUPPRESS, action=GenerateConfigAction, help='Generate the meta configuration file template.',
destined_for_config_file=False)
parser.add_argument('--telescope',
type=str,
action='append',
help='Telescope name, e.g., NICER. Use argument once per telescope name, and no whitespaces.',
comment_line_above='telescope instrument flags')
parser.add_argument('--instrument',
type=lambda x: str(x).replace(' ', '_').replace('-', '_'),
action='append',
help='Name of an instrument on-board a telescope, e.g., XTI. Can use one or more instrument names per telescope name, and no whitespaces.',
empty_lines_below=2)
parser.add_argument('--source',
type=str,
help='The name of the star, e.g., PSR J0740+6620.',
comment_line_above='target source flags')
parser.add_argument('--frequency',
type=float,
required=True,
help='The coordinate spin frequency of the star (Hz).',
empty_lines_below=2)
parser.add_argument('--model',
type=str,
help='A custom model name, e.g., ST-U + NSX-H, otherwise the model name is constructed from other arguments.',
comment_line_above='model flags',
comment=True)
parser.add_argument('--hot-region-model',
type=str,
action='append',
choices=['ST', 'CST', 'EST', 'PST', 'CDT', 'EDT', 'PDT'],
required=True,
help='The name of the hot-region model, e.g., ST. Maximum of two argument uses.')
parser.add_argument('--antipodal-reflection-symmetry',
action='store_true',
help='Are the two hot regions related via antipodal reflection symmetry? E.g., ST-S.',
comment=True)
parser.add_argument('--break-hot-region-exchange-degeneracy-with',
type=str,
default='super_colatitude',
help='Hot region parameter name to break hot-region exchange degeneracy with when there are two hot-regions of the same type that are not antipodally reflection-symmetric, e.g., ST+ST (ST-U). An example is e.g., "super_temperature".',
comment=True)
def str_to_bool(x):
if x == 'False':
return False
elif x == 'True':
return True
raise ValueError('Invalid argument where boolean ``True`` or ``False`` is required.')
parser.add_argument('--is-antiphased',
type=str_to_bool,
action='append',
help='Specify whether the hot regions are anti-phased w.r.t to Earth.')
parser.add_argument('--prefix',
type=str,
action='append',
help='Specify the prefixes for hot region parameter naming.')
parser.add_argument('--hot-atmosphere-model',
type=str,
help='Name of atmosphere model within hot regions, e.g., blackbody or NSX-H.')
parser.add_argument('--hot-atmosphere-load',
action='store_true',
help='Does a numeric atmosphere table need to be loaded from disk for the hot regions?',
comment=True)
parser.add_argument('--elsewhere-atmosphere-model',
type=str,
help='Name of atmosphere model elsewhere, e.g., blackbody or NSX-H.')
parser.add_argument('--elsewhere-atmosphere-load',
action='store_true',
help='Does a numeric atmosphere table need to be loaded from disk for elsewhere?',
comment=True)
parser.add_argument('--attenuation-model',
type=str,
help='Name of interstellar attenuation model, e.g., tbnew.',
empty_lines_below=2)
parser.add_argument('--background-model',
action='store_true',
help='Include an incident background component?',
comment=True)
parser.add_argument('--background-shared-instance',
action='store_true',
help='Do all instruments share the same background model instance?')
parser.add_argument('--background-shared-class',
action='store_true',
help='Do all instrument models share a background class?')
parser.add_argument('--background-parameters',
type=lambda x: ( str(x).replace(' ', '_') ).replace('-', '_'),
nargs='*',
default=['powerlaw_index', 'powerlaw_normalization'],
help='Background model parameter names.',
comment=True,
inline_comment='enter one name per line below',
empty_lines_below=2)
parser.add_argument('--print-MPI-rank',
action='store_true',
help='Print MPI rank from main module?',
comment_line_above='miscellaneous flags',
empty_lines_below=2)
parser.add_argument('--config-path',
type=str,
help='If main module is imported, use this argument to specify the relative or absolute path to the configuration file.',
comment_line_above='write flags')
parser.add_argument('--module-directory-path',
type=str,
help='Absolute path to directory to write module files to.')
parser.add_argument('--main-module',
type=str,
default='main',
help='Name of the main module.')
parser.add_argument('--custom-signal-module',
type=str,
default='CustomSignal',
help='Name of the module containing the CustomSignal subclass.')
parser.add_argument('--custom-instrument-module',
type=str,
default='CustomInstrument',
help='Name of the module containing the CustomInstrument subclass.')
parser.add_argument('--custom-photosphere-module',
type=str,
default='CustomPhotosphere',
help='Name of the module containing the CustomPhotosphere subclass.')
parser.add_argument('--custom-interstellar-module',
type=str,
default='CustomInterstellar',
help='Name of the module containing the CustomInterstellar subclass.')
parser.add_argument('--custom-prior-module',
type=str,
default='CustomPrior',
help='Name of the module containing the CustomPrior subclass.')
parser.add_argument('--custom-background-module',
type=str,
default='CustomBackground',
help='Name of the module containing the CustomBackground subclass(es).')
if __name__ == '__main__':
if xpsi._verbose:
print('Parsing configuration file...')
args, _ = parser.parse_known_args()
if xpsi._verbose:
print('Configuration file parsed.')
else:
if xpsi._verbose:
print('Parsing configuration file...')
args, _ = parser.parse_known_args(['@generate.ini'])
if xpsi._verbose:
print('Configuration file parsed.')
if len(args.hot_region_model) > 2:
raise ValueError('A maximum of two hot regions are permitted for module autogeneration.')
_telescopes = args.telescope[0]
for _x in args.telescope[1:]:
_telescopes += ' x {}'.format(_x)
if args.model is None:
if len(args.hot_region_model) == 2:
if args.hot_region_model[0] == args.hot_region_model[1]:
if args.antipodal_reflection_symmetry:
_tmp = '{}-S'.format(args.hot_region_model[0])
else:
_tmp = '{}-U'.format(args.hot_region_model[0])
else:
if args.antipodal_reflection_symmetry:
raise ValueError('Hot region models are not identical, so antipodal reflection symmetry cannot be imposed.')
_tmp = '{}+{}'.format(args.hot_region_model[0],
args.hot_region_model[1])
else:
_tmp = args.hot_region_model[0]
args.model = '{} + {}'.format(_tmp,
args.hot_atmosphere_model)
if args.elsewhere_atmosphere_model is not None:
args.model += ' + {}'.format(args.elsewhere_atmosphere_model)
module = (
'''""" Main module for {} {} <- X-PSI {} {}"""'''.format(_telescopes,
args.source,
xpsi.__version__,
args.model)
)
_telescopes = args.telescope[0]
for _x in args.telescope[1:]:
_telescopes += ' & {}'.format(_x)
module += (
'''
from __future__ import print_function, division
import os
import six
import argparse
import re
class ArgumentParserCustom(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
if (re.match(r'^[\s]*#', arg_line) or # look for any number of whitespace characters up to a `#` character
re.match(r'^[\s]*$', arg_line)): # look for lines containing nothing or just whitespace
return []
else:
try:
_idx = arg_line.index('#')
except ValueError:
pass
else:
arg_line = arg_line[:_idx].rstrip()
if xpsi._verbose:
print(arg_line)
return [arg_line]
def add_argument(self, *args, **kwargs):
if kwargs.pop('destined_for_config_file', True) and args[0] != '-h':
_ = (args[0],
kwargs.get('default', None),
kwargs.get('nargs', 1) if kwargs.get('action') != 'store_true' else 0,
kwargs.pop('comment_line_above', None),
kwargs.pop('empty_lines_below', 0),
kwargs.pop('comment', False),
kwargs.pop('inline_comment', None),
kwargs.get('action', None))
try:
self._config_file_args
except AttributeError:
self._config_file_args = [_]
else:
self._config_file_args.append(_)
else:
_ = kwargs.pop('comment_line_above', None)
_ = kwargs.pop('empty_lines_below', 0)
_ = kwargs.pop('comment', False)
_ = kwargs.pop('inline_comment', None)
super(ArgumentParserCustom, self).add_argument(*args, **kwargs)
class CompileAction(argparse._StoreAction):
""" Compile arguments for dynamic evaluation. """
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, list):
for i, value in enumerate(values):
values[i] = compile(value, '<string>', 'eval')
setattr(namespace, self.dest, values)
else:
setattr(namespace, self.dest, compile(values, '<string>', 'eval'))
class GenerateConfigAction(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(GenerateConfigAction, self).__init__(option_strings, dest, nargs=0, **kwargs)
@staticmethod
def _typeset(arg, default, nargs, comment_line_above, empty_lines_below, comment, inline_comment, action, newline=True):
entry = '\\n' if newline else ''
if comment_line_above is not None:
if comment_line_above == 'rule':
entry += '#' + '-'*78 + '#\\n'
else:
_ = '## {{}} ##'.format(comment_line_above)
entry += '##' + '-' * (len(_) - 4) + '##\\n' + _ + '\\n##' + '-' * (len(_) - 4) + '##\\n'
_ = ' ## {{}}'.format(inline_comment) if inline_comment is not None else ''
if not _ and isinstance(nargs, int) and nargs > 1:
_ = ' ## enter {{}} values below, one per empty line'.format(nargs)
elif not _ and not isinstance(nargs, int):
_ = ' ## enter code below, one statement per line'
if isinstance(default, list):
for i, _default in enumerate(default):
entry += '{{5}}{{0}}{{1}}{{2}}{{3}}{{4}}'.format('#' if comment else '',
'' if (i > 0 and nargs != 1) else arg,
'' if nargs != 1 else '=',
'' if (i == 0 and nargs != 1) else str(_default),
(_ if i == 0 else '') + ('\\n{{0}}{{1}}'.format('#' if comment else '', str(_default)) if i == 0 and nargs != 1 else ''),
'\\n' if i > 0 else '')
else:
entry += '{{0}}{{1}}{{2}}{{3}}{{4}}'.format('#' if comment else '',
arg,
'=' if nargs == 1 else '',
_ if nargs != 1 else (str(default) if default is not None else ''),
('\\n' + str(default) if default is not None else '') if nargs != 1 else _)
if action == 'append':
entry += '\\n#{{0}}='.format(arg)
if isinstance(nargs, int) and nargs > 1:
entry += '\\n' * nargs
elif isinstance(nargs, str):
entry += '\\n' * 3
entry += '\\n' * empty_lines_below
return entry
def __call__(self, parser, namespace, values, option_string=None):
for _ in parser._config_file_args:
try:
config_file
except NameError:
config_file = self._typeset(*_, newline=False)
else:
config_file += self._typeset(*_)
with open('{3}', 'w') as file:
file.write(config_file)
print('Configuration file generated.')
parser.exit()
class NullAction(argparse.Action):
""" Do not store value in namespace. """
def __call__(self, parser, namespace, values, option_string=None):
pass
parser = ArgumentParserCustom(
description="""
Main module for X-PSI {0} modelling of {1} {2} event data.
You can run this module as a script and launch a sampler, optionally
with a world of MPI processes.
Alternate usage: mpiexec -n 4 python -m mpi4py %(prog)s [-h] @<config.ini>
""",
fromfile_prefix_chars='@')
'''.format(args.model,
_telescopes,
args.source,
args.config_path)
)
module += (
'''\ndef str_to_bool(x):
if x == 'False':
return False
elif x == 'True':
return True
raise ValueError('Invalid argument where boolean ``True`` or ``False`` is required.')
'''
)
module += (
'''
parser.add_argument('--generate-config-file', default=argparse.SUPPRESS, action=GenerateConfigAction, help='Generate the configuration file template.',
destined_for_config_file=False)
'''
)
module += (
'''
parser.add_argument('--main-import-statements',
type=str,
nargs='*',
default=['from xpsi.global_imports import gravradius', 'import math'],
help='Custom import statements needed for main module. Each statement is executed with the ``exec(...)`` builtin function. Note that if you pass statements, the default statements are deleted unless you uncomment the defaults in the configuration file.',
comment=True,
comment_line_above='import statements needed for main module',
empty_lines_below=2,
inline_comment='e.g., from ... import ... as ...')
parser.add_argument('--main-global-statements',
type=str,
nargs='*',
help='Custom assignment statements to be evaluated on the global level in the main module. Each statement is executed with the ``exec(...)`` builtin function. Note that if you pass statements, the default statements are deleted unless you uncomment the defaults in the configuration file.',
comment=True,
comment_line_above='global statements needed for main module',
empty_lines_below=2,
inline_comment='e.g., global_variable = math.pi')
parser.add_argument('--prior-import-statements',
type=str,
nargs='*',
default=['from scipy.stats import truncnorm', 'import math'],
action=NullAction,
help='Custom import statements needed for evaluation of prior CDFs. Each statement is executed with the ``exec(...)`` builtin function. Note that if you pass statements, the default statements are deleted unless you uncomment the defaults in the configuration file.',
comment=True,
comment_line_above='import statements needed for prior',
empty_lines_below=2,
inline_comment='e.g., from ... import ... as ...')
parser.add_argument('--prior-global-statements',
type=str,
nargs='*',
action=NullAction,
help='Custom assignment statements to be evaluated on the global level that are useful, e.g., for evaluation of prior CDFs. Each statement is executed with the ``exec(...)`` builtin function. Note that if you pass statements, the default statements are deleted unless you uncomment the defaults in the configuration file.',
comment=True,
comment_line_above='global statements needed for prior',
empty_lines_below=2,
inline_comment='e.g., global_variable = math.pi')
'''
)
_path = 'Absolute or relative path to'
_bounds_default_notice = 'If no bounds are given (``None``), and no value is given (``None``), bounds default to the source code strict bounds.'
_value_notice = 'No value means the parameter is free, whilst a value means the parameter is fixed (and the prior may need to be modified manually). If you want the parameter to be derived from other parameters in a complex way, manual modification of the main module is necessary, but we support functions of one parameter in the configuration file.'
_derived_notice = 'If the parameter is derived from one other parameter, e.g., the temperature of a hot region is derived from the temperature of the other hot region, then the value needs to be written using the following template: lambda x: f(x), "parameter", "space". In this template: f(x) is a function of the parameter x from which the value is derived; "parameter" is the name of the parameter x as a string; and "space" is an object in the global namespace, with name written as a string, which is a (sub)space of parameters from which the current value of parameter x can be accessed via getitem magic using "parameter".'
_CDF_notice = 'Supply a function of one variable (the probability mass ``x``), in the form of an expression that can be evaluated with the ``eval(...)`` builtin function, i.e., scipy.stats.truncnorm(x, ...). Note that the prior default PDF is uniform (with compact support), so do not supply a CDF if a uniform prior is desired, or to be explicit, use: DEFAULT UNIFORM. You must use DEFAULT UNIFORM to overwrite a default CDF shown in the auto-generated configuration file, unless the parameter is fixed/derived in which case the prior flag is silently ignored. You can also use the flag more than once: the last usage must be an expression that will be dynamically evaluated using the ``eval(...)`` builtin and must return a float to set as the parameter value; the other usages can be helper statements executed with the ``exec(...)`` builtin, e.g., to set temporary local variables to make the code (and configuration file more readable).'
for instrument in args.instrument:
module += (
'''
parser.add_argument('--{0}-exposure-time',
type=float,
help='{0} exposure time in seconds.',
comment_line_above='{0} configuration flags')
parser.add_argument('--{0}-count-matrix-path', type=str, help='{1} {0} channel-phase count matrix. If the data is a spectrum (phase-averaged), then the file must contain a vector. This path is written to if the file does not exist by processing the event files.')
parser.add_argument('--{0}-count-matrix-type', type=str, default='double', help='{0} count matrix NumPy data type.',
comment=True)
parser.add_argument('--{0}-event-path', type=str, help='{1} {0} event list file.')
parser.add_argument('--{0}-number-phase-bins', type=int, help='Number of phases bins for binning {0} event list file.')
parser.add_argument('--{0}-event-file-channel-column', type=int, default=1, help='Channel column in {0} event list file.',
comment=True)
parser.add_argument('--{0}-event-file-phase-column', type=int, default=2, help='Phase column in {0} event list file.',
comment=True)
parser.add_argument('--{0}-event-file-skiprows', type=int, default=3, help='Number of top rows to skip when loading {0} event list file.',
comment=True)
parser.add_argument('--{0}-events-in-eV', action='store_true', help='{0} event list file lists events by energy in eV?',
comment=True)
parser.add_argument('--{0}-arf-path', type=str, help='{1} {0} ARF file.',
comment_line_above='rule')
parser.add_argument('--{0}-effective-area-scaling-factor', type=str, default='1.0',
help='Factor by which to scale the nominal effective area model, as a mathematical expression, e.g., a ratio of integers such as 51.0/52.0.',
comment=True)
parser.add_argument('--{0}-arf-skiprows', type=int, default=3,
help='Number of header rows to skip when loading ARF file.',
comment=True)
parser.add_argument('--{0}-arf-low-column', type=int, default=1,
help='Column (zero-indexed) containing the low energy edges in the ARF file.',
comment=True)
parser.add_argument('--{0}-arf-high-column', type=int, default=2,
help='Column (zero-indexed) containing the high energy edges in the ARF file.',
comment=True)
parser.add_argument('--{0}-arf-area-column', type=int, default=3,
help='Column (zero-indexed) containing the effective area in the ARF file.',
comment=True)
parser.add_argument('--{0}-rmf-path', type=str, help='{1} {0} RMF file.',
comment_line_above='rule')
parser.add_argument('--{0}-rmf-skiprows', type=int, default=3,
help='Number of header rows to skip when loading RMF file.',
comment=True)
parser.add_argument('--{0}-rmf-usecol', type=int, default=-1,
help='Column (zero-indexed) containing the flattened redistribution matrix elements in the RMF file.',
comment=True)
parser.add_argument('--{0}-channels-path', type=str, help='{1} {0} channel-energy mapping file.',
comment_line_above='rule')
parser.add_argument('--{0}-channel-energies-skiprows', type=int, default=0,
help='Number of header rows to skip when loading channel-energy mapping file.',
comment=True)
parser.add_argument('--{0}-channel-energies-low-column', type=int, default=0,
help='Column (zero-indexed) containing the low energy edges in the channel-energy mapping file.',
comment=True)
parser.add_argument('--{0}-input-bounds',
type=int,
nargs=2,
help='{0} bounding input energy intervals of instrument response submatrix for use with NumPy slice notation.',
comment_line_above='rule')
parser.add_argument('--{0}-channel-bounds',
type=int,
nargs=2,
help='{0} bounding channels of instrument response submatrix for use with NumPy slice notation.')
parser.add_argument('--{0}-energy-independent-effective-area-scaling-factor-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds for {0} energy-independent effective area scaling factor parameter. If no bounds are given (``None``), and no value is given (``None``), the parameter value is fixed at unity, and the instrument response model is locked to the nominal response model (unless a custom model is implemented).',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-energy-independent-effective-area-scaling-factor-prior',
type=str,
nargs='*',
default=['truncnorm.ppf(x, -3.0, 3.0, loc=1.0, scale=0.1)'],
action=NullAction,
help='Prior inverse CDF of the energy-independent effective area scaling factor. {5}',
comment=True,
inline_comment='Normal distribution with std. dev. 10%, truncated at +/- 3 std. dev.')
parser.add_argument('--{0}-energy-independent-effective-area-scaling-factor-value',
type=str,
action=CompileAction,
help='Value for {0} energy-independent effective area scaling parameter. Either the name of an instrument to share the parameter with, as a string, or a float. {3} {4}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-phase-shift-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds for {0} phase-shift parameter. If no bounds are given (``None``), and no value is given (``None``), the parameter value is fixed at zero, and is therefore locked to the phase of the signal specified by the hot region phases. For one phase-resolving instrument, this default behaviour is advised, and additional phase-resolving instruments can in principle have a different fixed, derived, or free phase-shift parameter. For instruments that phase-average, the phase-shift can be arbitrarily fixed or derived, but not free because the likelihood is not a function of it.',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-phase-shift-value',
type=str,
action=CompileAction,
help='Value for {0} phase-shift parameter. Either the name of an instrument to share the parameter with, as a string, or a float. {3} {4}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-background-prior-support-path', type=str, help='{1} {0} background prior support file. The channel-by-channel lower count-rate limits in the zeroth column, and the upper count-rate limits in the first column. The channels must already match the data.',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-background-skiprows', type=int, default=0, help='Number of top rows to skip when loading {0} background file (prior support or spectrum file).',
comment=True)
parser.add_argument('--{0}-background-path', type=str, help='{1} {0} background spectrum file (for imaging telescope).',
comment=True)
parser.add_argument('--{0}-background-usecol', type=int, help='Column to use when loading {0} background spectrum file (for imaging telescope).',
comment=True)
parser.add_argument('--{0}-background-prior-support-half-width', type=float, help='{0} background prior support half-width (for imaging telescope). The half-width is in units of standard deviation of background count number per instrument channel.',
comment=True)
parser.add_argument('--{0}-background-exposure-time',
type=float,
help='{0} background exposure time in seconds (for imaging telescope).',
comment=True)
parser.add_argument('--{0}-background-scaling-factor',
type=str,
help='{0} background scaling factor, nominally the ratio of on-source CCD extraction area to background CCD extraction area (ideally on same CCD) for imaging telescope. Supply an expression for evaluation by the ``eval(...)`` builtin function.',
comment=True,
empty_lines_below=2)
'''.format(instrument.replace('_','-'),
_path,
_bounds_default_notice,
_value_notice,
_derived_notice,
_CDF_notice)
)
if args.background_model:
for i, _parameter in enumerate(args.background_parameters):
module += (
'''
parser.add_argument('--{0}-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of the ``{0}`` parameter. {1}',
comment=True,
comment_line_above='{4}')
parser.add_argument('--{0}-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of the ``{0}`` parameter. {3}',
comment=True)
parser.add_argument('--{0}-value',
type=str,
action=CompileAction,
default='{5}',
help='Value of the ``{0}`` parameter. {2}',
comment=True,
inline_comment='{6}',
empty_lines_below=2)
'''.format(_parameter.replace('_', '-'),
_bounds_default_notice,
_value_notice,
_CDF_notice,
'background flags' if i == 0 else 'rule',
str(1.0) if 'powerlaw_norm' in _parameter else None,
'to allow parameter to be free, uncomment and use: None' if 'powerlaw_norm' in _parameter else None)
)
module += (
'''
parser.add_argument('--attenuation-path', type=str, help='{0} attenuation file.',
comment_line_above='attenuation flags')
parser.add_argument('--attenuation-energy-column', type=int, default=0,
help='Column (zero-indexed) containing the energies in the attenuation file.',
comment=True)
parser.add_argument('--attenuation-column', type=int, default=1,
help='Column (zero-indexed) containing the attenuation factors in the attenuation file.',
comment=True)
parser.add_argument('--neutral-hydrogen-column-density-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of the neutral hydrogen column density parameter. {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--neural-hydrogen-column-density-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of ratio of interstellar neutral hydrogen column density to the fiducial density. {3}',
comment=True)
parser.add_argument('--neutral-hydrogen-column-density-value',
type=str,
action=CompileAction,
help='Value of the neutral hydrogen column density parameter. {2}',
comment=True,
empty_lines_below=2)
'''.format(_path,
_bounds_default_notice,
_value_notice,
_CDF_notice)
)
module += (
'''
parser.add_argument('--mass-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of gravitational mass (solar masses). {1}',
comment=True,
comment_line_above='spacetime flags')
parser.add_argument('--mass-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of gravitation mass (solar masses). {4}',
comment=True)
parser.add_argument('--mass-value',
type=str,
action=CompileAction,
help='Value of gravitational mass (solar masses). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--radius-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of coordinate equatorial radius (km). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--radius-value',
type=str,
action=CompileAction,
help='Value of coordinate equatorial radius (km). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--cos-inclination-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of cosine of Earth colatitude (inclination) w.r.t to stellar rotation axis. {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--cos-inclination-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of cosine of Earth inclination to stellar spin axis. {4}',
comment=True)
parser.add_argument('--cos-inclination-value',
type=str,
action=CompileAction,
help='Value of cosine of Earth colatitude (inclination) w.r.t to stellar rotation axis. {2}',
comment=True,
empty_lines_below=2)
parser.add_argument('--distance-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of distance to source (kpc). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--distance-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of distance to source (kpc). {4}',
comment=True)
parser.add_argument('--distance-value',
type=str,
action=CompileAction,
help='Value of distance to source (kpc). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_path,
_bounds_default_notice,
_value_notice,
_derived_notice,
_CDF_notice)
)
for _h, _m in zip(args.prefix, args.hot_region_model)[:1 if (len(args.hot_region_model) == 1 or args.antipodal_reflection_symmetry) else 2]:
module += (
'''
parser.add_argument('--{0}-super-colatitude-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" super-member colatitude w.r.t stellar spin axis (radians). {1}',
comment_line_above='"{0}" hot region parameter flags',
comment=True)
parser.add_argument('--{0}-super-colatitude-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" super-member colatitude w.r.t stellar spin axis (radians). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-super-radius-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" super-member angular radius (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-super-radius-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" super-member angular radius (radians). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-super-temperature-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" super-member log10(temperature [K]). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-super-temperature-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of hot-region {0} superseding region log10(temperature [K]). {4}',
comment=True)
parser.add_argument('--{0}-super-temperature-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" super-member log10(temperature [K]). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_h,
_bounds_default_notice,
_value_notice,
_derived_notice,
_CDF_notice)
)
if _m in ['CST', 'EST', 'PST']:
module += (
'''
parser.add_argument('--{0}-omit-radius-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" omit-member angular radius (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-omit-radius-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" omit-member angular radius (radians). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_h,
_bounds_default_notice,
_value_notice,
_derived_notice)
)
if _m in ['EST', 'PST']:
module += (
'''
parser.add_argument('--{0}-omit-colatitude-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" omit-member colatitude w.r.t stellar spin axis (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-omit-colatitude-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" omit-member colatitude w.r.t stellar spin axis (radians). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-omit-azimuth-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" omit-member azimuth relative to super-member (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-omit-azimuth-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" omit-member azimuth relative to super-member (radians). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_h,
_bounds_default_notice,
_value_notice,
_derived_notice)
)
if _m in ['CDT', 'EDT', 'PDT']:
module += (
'''
parser.add_argument('--{0}-cede-radius-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" cede-member angular radius (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-cede-radius-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" cede-member angular radius (radians). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-cede-temperature-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" cede-member log10(temperature [K]). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-cede-temperature-prior',
type=str,
nargs='*',
action=NullAction,
help='Prior inverse CDF of hot-region {0} ceding region log10(temperature [K]). {4}',
comment=True)
parser.add_argument('--{0}-cede-temperature-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" cede-member log10(temperature [K]). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_h,
_bounds_default_notice,
_value_notice,
_derived_notice,
_CDF_notice)
)
if _m in ['EDT', 'PDT']:
module += (
'''
parser.add_argument('--{0}-cede-colatitude-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" cede-member colatitude w.r.t stellar spin axis (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-cede-colatitude-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" cede-member colatitude w.r.t stellar spin axis (radians). {2} {3}',
comment=True,
empty_lines_below=2)
parser.add_argument('--{0}-cede-azimuth-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" cede-member azimuth relative to super-member (radians). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-cede-azimuth-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" cede-member azimuth relative to super-member (radians). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_h,
_bounds_default_notice,
_value_notice,
_derived_notice)
)
module += (
'''
parser.add_argument('--{0}-phase-shift-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of hot region "{0}" phase shift (cycles). {1}',
comment=True,
comment_line_above='rule')
parser.add_argument('--{0}-phase-shift-value',
type=str,
action=CompileAction,
help='Value of hot region "{0}" phase shift (cycles). {2} {3}',
comment=True,
empty_lines_below=2)
'''.format(_h,
_bounds_default_notice,
_value_notice,
_derived_notice)
)
for _h in args.prefix:
module += (
'''
parser.add_argument('--{0}-sqrt-num-cells',
type=int,
default=32,
help='Target square-root of the number of cells spanning (raditing subset of) hot region "{0}".',
comment_line_above='"{0}" hot region resolution flags')
parser.add_argument('--{0}-min-sqrt-num-cells',
type=int,
default=10,
help='Minimum square-root of the number of cells constituting hot region "{0}" mesh.')
parser.add_argument('--{0}-max-sqrt-num-cells',
type=int,
default=80,
help='Maximum square-root of the number of cells constituting hot region "{0}" mesh.')
parser.add_argument('--{0}-num-leaves',
type=int,
default=64,
help='Number of phases on unit interval at which to compute hot region "{0}" signal.')
parser.add_argument('--{0}-num-rays',
type=int,
default=512,
help='Number of rays per iso-latitude mesh subset to trace when computing hot region "{0}" signal.',
empty_lines_below=2)
'''.format(_h)
)
if args.hot_atmosphere_load:
module += (
'''
parser.add_argument('--hot-atmosphere-path', type=str, help='{0} hot atmosphere file.',
comment_line_above='hot atmosphere flags')
parser.add_argument('--hot-atmosphere-size',
type=int,
nargs=4,
action=NullAction,
help='Size of each of the four dimensions of the numeric atmosphere table for the hot regions.',
empty_lines_below=2)
'''.format(_path)
)
if args.elsewhere_atmosphere_model is not None:
module += (
'''
parser.add_argument('--elsewhere-temperature-bounds',
type=str,
nargs=2,
action=CompileAction,
help='Bounds of log10(temperature [K]) elsewhere. {0}',
comment_line_above='elsewhere flags',
comment=True)
parser.add_argument('--elsewhere-temperature-value',
type=str,
action=CompileAction,
help='Value of log10(temperature [K]) elsewhere. {1} {2}',
comment=True,
empty_lines_below=2)
parser.add_argument('--elsewhere-sqrt-num-cells',
type=int,
default=64,
help='Target square-root of the number of cells spanning (raditing subset of) the elsewhere region.')
parser.add_argument('--elsewhere-num-rays',
type=int,
default=1024,
help='Number of rays per iso-latitude mesh subset to trace when computing the signal from elsewhere.',
empty_lines_below={3:d})
'''.format(_bounds_default_notice,
_value_notice,
_derived_notice,
0 if args.elsewhere_atmosphere_load else 2)
)
if args.elsewhere_atmosphere_load:
module += (
'''
parser.add_argument('--elsewhere-atmosphere-path', type=str, help='{0} Elsewhere atmosphere file.')
parser.add_argument('--elsewhere-atmosphere-size',
type=int,
nargs=4,
action=NullAction,
help='Size of each of the four dimensions of the numeric atmosphere table for elsewhere.',
empty_lines_below=2)
'''.format(_bounds_default_notice)
)
module += (
'''
parser.add_argument('--image-order-limit',
type=int,
default=3,
help='The highest-order image to sum over. Either a positive integer, or do not pass an argument if a hard limit is not desired.',
comment_line_above='global resolution flags')
'''
)
module += (
'''
parser.add_argument('--number-energies',
type=int,
default=128,
help='Number of energies, distributed over instrument wavebands, to compute incident photon specific flux at.')
parser.add_argument('--maximum-energy-ray-tracing',
type=int,
help='Maximum energy for ray tracing. Useful if there is a background component such as a powerlaw that is jointly modelled with higher-energy event data using a subset of instruments.',
comment=True,
empty_lines_below=2)
parser.add_argument('--openmp-threads',
type=int,
default=1,
help='Number of OpenMP threads to spawn during likelihood function calls.',
comment_line_above='miscellaneous flags')
parser.add_argument('--parameters-externally-updated',
type=str_to_bool,
default=True,
help='Are the parameters updated before calling the likelihood object, e.g., in the prior object?',
empty_lines_below=2)
'''
)
module += (
'''
parser.add_argument('--multinest', action='store_true', help='Launch MultiNest sampler if module is executed.',
comment_line_above='runtime flags')
parser.add_argument('--resume', action='store_true', help='Resume sampling if module is executed.')
parser.add_argument('--multimodal', action='store_true', help='Activate the mode-separation algorithm variant of MultiNest.',
comment=True)
parser.add_argument('--sample-files-directory-path',
type=str,
default='samples/',
help='{} sample file directory. If no path is provided, the default (relative) path is "samples/".')
parser.add_argument('--sample-files-root',
type=str,
help='The root name of the sample files (i.e., without a file extension) to be generated or already generated by MultiNest. If no path is provided, the sample file root name will be constructed automatically from other sampling process settings.',
comment=True)
parser.add_argument('--number-iterations-per-write',
type=int,
default=100,
help='Number of nested replacements per write to disk of the sampling process to enable resumption. Posterior files are generated at a cadence of 10x this number.')
parser.add_argument('--number-live-points',
type=int,
default=1000,
help='Number of live points in nested sampling process.')
parser.add_argument('--hypervolume-expansion-factor',
type=float,
default=10.0,
help='Factor by which to expand the hyperellisoid union that approximately minimally bounds the set of live points.')
parser.add_argument('--constant-efficiency-variant',
action='store_true',
help='Activate MultiNest constant efficiency sampling variant? Warning: only use this option if computational resources are limited.',
comment=True)
parser.add_argument('--mode-separation-variant',
action='store_true',
help='Activate mode-separation sampling variant? Live point threads (an initial live point and the chain of subsequent replacements) do not migrate between threads by default.',
comment=True)
parser.add_argument('--estimated-remaining-log-evidence',
type=float,
default=0.1,
help='Estimated remaining log-evidence for sampling process termination.')
parser.add_argument('--maximum-number-nested-replacement-iterations',
type=int,
default=-1,
help='Maximum number of nested replacements for termination of the nested sampling process. Use negative one (the default) to terminate based on estimated remaining log-evidence instead.')
'''.format(_path)
)
module += (
'''
import xpsi
if __name__ == '__main__':
if xpsi._verbose:
print('Parsing configuration file...')
args, _ = parser.parse_known_args()
if xpsi._verbose:
print('Configuration file parsed.')
else:
if xpsi._verbose:
print('Parsing configuration file...')
args, _ = parser.parse_known_args(['@{}'])
if xpsi._verbose:
print('Configuration file parsed.')
'''.format(args.config_path)
)
module += (
'''
import os
import numpy as np
import math
from xpsi.Parameter import Derive
from xpsi import HotRegions
'''
)
if args.print_MPI_rank:
module += '''\nprint('Rank reporting: %d' % xpsi._rank) '''
module += (
'''
from {0} import CustomInstrument
from {1} import CustomSignal
from {2} import CustomInterstellar
try:
from {3} import CustomPhotosphere
except ImportError:
from xpsi import Photosphere as CustomPhotosphere
from {4} import CustomPrior
'''.format(args.custom_instrument_module,
args.custom_signal_module,
args.custom_interstellar_module,
args.custom_photosphere_module,
args.custom_prior_module)
)
if args.background_shared_instance:
args.background_shared_class = True
args.background_shared_parameters = True
elif args.background_shared_class:
args.background_shared_parameters = True
elif not args.background_shared_class:
args.background_shared_parameters = False
if args.background_shared_class:
module += (
'''
from {0} import CustomBackground
'''.format(args.custom_background_module)
)
elif args.background_shared_class:
for _instrument in args.instruments:
module += (
'''
from {0} import {1}_CustomBackground
'''.format(args.custom_background_module,
_instrument)
)
module += (
'''
if args.main_import_statements is not None:
for import_statement in args.main_import_statements:
exec(import_statement)
if args.main_global_statements is not None:
for global_statement in args.main_global_statements:
exec(global_statement)
'''
)
module += (
'''
class namespace():
pass
'''
)
module += (
'''
def parse_bounds(bounds, value, default_to_free=True):
if bounds is not None:
bounds[0] = eval(bounds[0])
bounds[1] = eval(bounds[1])
return tuple(bounds)
elif default_to_free:
return None if value is not None else (None, None)
return None
bounds = dict(neutral_hydrogen_column_density = parse_bounds(args.neutral_hydrogen_column_density_bounds,
args.neutral_hydrogen_column_density_value))
values = dict(neutral_hydrogen_column_density = args.neutral_hydrogen_column_density_value)
interstellar = CustomInterstellar.load(args.attenuation_path,
args.attenuation_energy_column,
args.attenuation_column,
bounds = bounds,
values = values)
'''
)
module += (
'''
signals = [[],]
'''
)
module += (
'''
def derived_parameter(func, parameter, space='caller'):
class derive(Derive):
def __init__(self):
self.space = compile(space, '<string>', 'eval')
def __call__(self, boundto, caller=None):
return func(eval(self.space)[parameter])
return derive()
def parse_value(value):
if value is not None:
try:
return float(eval(value))
except ValueError:
return derived_parameter(*eval(value))
else:
return None
'''
)
if args.background_model:
if args.background_shared_instance:
module += (
'''
bounds = {}
values = {}
'''
)
for _parameter in args.background_parameters:
module += (
'''
bounds['{0}'] = parse_bounds(args.{0}_bounds, args.{0}_value)
values['{0}'] = parse_value(args.{0}_value)
'''.format(_parameter)
)
module += (
'''
background = CustomBackground(bounds=bounds, values=values)
'''
)
for instrument in args.instrument:
module += (
'''
{0} = namespace()
if args.{0}_{2}_value is not None:
if eval(args.{0}_{2}_value) in {1}:
values = dict({2} = derived_parameter(lambda x: x,
'{2}',
eval(args.{0}_{2}_value) + '.instrument'))
else:
values = dict({2} = parse_value(args.{0}_{2}_value))
else:
values = {{}}
bounds = dict({2} = parse_bounds(args.{0}_{2}_bounds,
args.{0}_{2}_value,
default_to_free = False))
{0}.instrument = CustomInstrument.{0}(bounds=bounds,
values=values,
ARF=args.{0}_arf_path,
RMF=args.{0}_rmf_path,
channel_energies=args.{0}_channels_path,
max_input=args.{0}_input_bounds[1],
max_channel=args.{0}_channel_bounds[1],
min_input=args.{0}_input_bounds[0],
min_channel=args.{0}_channel_bounds[0],
effective_area_scaling_factor=eval(args.{0}_effective_area_scaling_factor),
ARF_skiprows=args.{0}_arf_skiprows,
ARF_low_column=args.{0}_arf_low_column,
ARF_high_column=args.{0}_arf_high_column,
ARF_area_column=args.{0}_arf_area_column,
RMF_skiprows=args.{0}_rmf_skiprows,
RMF_usecol=args.{0}_rmf_usecol,
channel_energies_skiprows=args.{0}_channel_energies_skiprows,
channel_energies_low_column=args.{0}_channel_energies_low_column)
try:
counts = np.loadtxt(args.{0}_count_matrix_path, dtype=np.double)
except ValueError:
{0}.data = xpsi.Data.bin__event_list(args.{0}_event_path,
channels={0}.instrument.channels,
phases=np.linspace(0.0, 1.0, args.{0}_number_phase_bins + 1),
channel_column=args.{0}_event_file_channel_column,
phase_column=args.{0}_event_file_phase_column if args.{0}_number_phase_bins > 1 else None,
phase_averaged=True if args.{0}_number_phase_bins == 1 else False,
channel_edges={0}.instrument.channel_edges,
skiprows=args.{0}_event_file_skiprows,
eV=True if args.{0}_events_in_eV else False,
dtype=getattr(np, args.{0}_count_matrix_type),
first=0,
last=len({0}.instrument.channels) - 1,
exposure_time=args.{0}_exposure_time)
np.savetxt(args.{0}_event_path.replace('.txt','_converted_to_counts.txt'), {0}.data.counts)
print('Counts file saved as: '+args.{0}_event_path.replace('.txt','_converted_to_counts.txt'))
print('Update configuration file to take in counts file to save computation time.')
else:
if counts.ndim == 1:
counts = counts.reshape(-1,1)
{0}.data = xpsi.Data(counts,
channels={0}.instrument.channels,
phases=np.linspace(0.0, 1.0, args.{0}_number_phase_bins + 1),
first=0,
last=len({0}.instrument.channels) - 1,
exposure_time=args.{0}_exposure_time)
if args.{0}_background_prior_support_path:
support = np.loadtxt(args.{0}_background_path,
skiprows=args.{0}_background_skiprows,
dtype=np.double)
elif args.{0}_background_path:
spectrum = np.loadtxt(args.{0}_background_path,
skiprows=args.{0}_background_skiprows,
usecols=args.{0}_background_usecol,
dtype=np.double)[{0}.instrument.channels]
support = np.zeros((len(spectrum), 2), dtype=np.double)
support[:,0] = spectrum - args.{0}_background_prior_support_half_width * np.sqrt(spectrum)
support[support[:,0] < 0.0, 0] = 0.0
support[:,1] = spectrum + args.{0}_background_prior_support_half_width * np.sqrt(spectrum)
for i in range(support.shape[0]):
if support[i,1] == 0.0:
for j in range(i, support.shape[0]):
if support[j,1] > 0.0:
support[i,1] = support[j,1]
break
support *= ({0}.data.exposure_time / args.{0}_background_exposure_time) * float(eval(args.{0}_background_scaling_factor)) # exposure ratio * scaling
support /= {0}.data.exposure_time # need count rate, so divide by exposure time
else:
support = None
'''.format(instrument,
args.instrument,
'energy_independent_effective_area_scaling_factor')
)
if not args.background_model:
module += (
'''
{0}.background = None
'''.format(instrument)
)
else:
if args.background_shared_instance:
module += (
'''
{0}.background = background
'''.format(instrument)
)
elif args.background_shared_class:
if instrument == args.instrument[0]:
module += (
'''
bounds = {}
values = {}
'''
)
for _parameter in args.background_parameters:
module += (
'''
bounds['{0}'] = parse_bounds(args.{0}_bounds, args.{0}_value)
values['{0}'] = parse_value(args.{0}_value)
'''.format(_parameter)
)
else:
module += (
'''
bounds = None
values = {}
'''
)
for _parameter in args.background_parameters:
module += (
'''
values['{0}'] = derived_parameter(lambda x: x,
'{0}',
{1}.background)
'''.format(_parameter, args.instrument[0])
)
module += (
'''
{0}.background = CustomBackground(bounds=bounds, values=values)
'''.format(instrument)
)
elif not args.background_shared_class:
module += (
'''
bounds = {}
values = {}
'''
)
for _parameter in args.background_parameters:
if instrument in _parameter:
_parameter = _parameter[len(instrument):].lstrip('_')
module += (
'''
bounds['{0}'] = parse_bounds(args.{0}_bounds, args.{0}_value)
values['{0}'] = parse_value(args.{0}_value)
'''.format(_parameter)
)
module += (
'''
{0}.background = {0}_CustomBackground(bound=bounds, values=values)
'''
)
module += (
'''
if args.{0}_phase_shift_value is not None:
if eval(args.{0}_phase_shift_value) in {1}:
values = dict(phase_shift = derived_parameter(lambda x: x,
'phase_shift',
eval(args.{0}_phase_shift_value) + '.signal'))
else:
values = dict(phase_shift = parse_value(args.{0}_phase_shift_value))
else:
values = {{}}
bounds = dict(phase_shift = parse_bounds(args.{0}_phase_shift_bounds,
args.{0}_phase_shift_value,
default_to_free=False))
{0}.signal = CustomSignal(data = {0}.data,
instrument = {0}.instrument,
interstellar = interstellar,
background = {0}.background,
cache = False if __name__ == '__main__' else True,
bounds = bounds,
values = values,
workspace_intervals = 1000,
epsrel = 1.0e-8,
epsilon = 1.0e-3,
sigmas = 10.0,
support = support,
prefix = '{0}')
signals[0].append({0}.signal)
'''.format(instrument,
args.instrument,
'energy_independent_effective_area_scaling_factor')
)
module += (
'''
bounds = dict(mass = parse_bounds(args.mass_bounds,
args.mass_value),
radius = parse_bounds(args.radius_bounds,
args.radius_value),
distance = parse_bounds(args.distance_bounds,
args.distance_value),
cos_inclination = parse_bounds(args.cos_inclination_bounds,
args.cos_inclination_value))
values = dict(mass = parse_value(args.mass_value),
radius = parse_value(args.radius_value),
distance = parse_value(args.distance_value),
cos_inclination = parse_value(args.cos_inclination_value),
frequency = {0})
spacetime = xpsi.Spacetime(bounds, values)
'''.format(str(args.frequency))
)
def get_bounds_and_values(prefix, model):
global module
module += (
'''
bounds = dict(super_colatitude = parse_bounds(args.{0}_super_colatitude_bounds,
args.{0}_super_colatitude_value),
super_radius = parse_bounds(args.{0}_super_radius_bounds,
args.{0}_super_radius_value),
phase_shift = parse_bounds(args.{0}_phase_shift_bounds,
args.{0}_phase_shift_value),
super_temperature = parse_bounds(args.{0}_super_temperature_bounds,
args.{0}_super_temperature_value))
values = dict(super_colatitude = parse_value(args.{0}_super_colatitude_value),
super_radius = parse_value(args.{0}_super_radius_value),
phase_shift = parse_value(args.{0}_phase_shift_value),
super_temperature = parse_value(args.{0}_super_temperature_value))
'''.format(prefix)
)
if model in ['CST','EST','PST']:
module += (
'''
bounds['omit_radius'] = parse_bounds(args.{0}_omit_radius_bounds,
args.{0}_omit_radius_value)
values['omit_radius'] = parse_value(args.{0}_omit_radius_value)
'''.format(prefix)
)
if model in ['EST', 'PST']:
module += (
'''
bounds['omit_colatitude'] = parse_bounds(args.{0}_omit_colatitude_bounds,
args.{0}_omit_colatitude_value)
values['omit_colatitude'] = parse_value(args.{0}_omit_colatitude_value)
bounds['omit_azimuth'] = parse_bounds(args.{0}_omit_azimuth_bounds,
args.{0}_omit_azimuth_value)
values['omit_azimuth'] = parse_value(args.{0}_omit_azimuth_value)
'''.format(prefix)
)
if 'DT' in model:
module += (
'''
bounds['cede_radius'] = parse_bounds(args.{0}_cede_radius_bounds,
args.{0}_cede_radius_value)
values['cede_radius'] = parse_value(args.{0}_cede_radius_value)
bounds['cede_temperature'] = parse_bounds(args.{0}_cede_temperature_bounds,
args.{0}_cede_temperature_value)
values['cede_temperature'] = parse_value(args.{0}_cede_temperature_value)
'''.format(prefix)
)
if model in ['EDT', 'PDT']:
module += (
'''
bounds['cede_colatitude'] = parse_bounds(args.{0}_cede_colatitude_bounds,
args.{0}_cede_colatitude_value)
values['cede_colatitude'] = parse_value(args.{0}_cede_colatitude_value)
bounds['cede_azimuth'] = parse_bounds(args.{0}_cede_azimuth_bounds,
args.{0}_cede_azimuth_value)
values['cede_azimuth'] = parse_value(args.{0}_cede_azimuth_value)
'''.format(prefix)
)
def get_member_settings(model):
global module
if model == 'ST':
module += (
'''
symmetry = True
omit = False
cede = False
concentric = False
'''
)
elif model == 'CST':
module += (
'''
symmetry = True
omit = True
cede = False
concentric = True
'''
)
elif model in ['EST','PST']:
module += (
'''
symmetry = True
omit = True
cede = False
concentric = False
'''
)
elif model == 'CDT':
module += (
'''
symmetry = True
omit = False
cede = True
concentric = True
'''
)
elif model in ['EDT', 'PDT']:
module += (
'''
symmetry = True
omit = False
cede = True
concentric = False
'''
)
get_member_settings(args.hot_region_model[0])
get_bounds_and_values(args.prefix[0], args.hot_region_model[0])
module += (
'''
primary = xpsi.HotRegion(bounds=bounds,
values=values,
symmetry=symmetry,
omit=omit,
cede=cede,
concentric=concentric,
sqrt_num_cells=args.{1}_sqrt_num_cells,
min_sqrt_num_cells=args.{1}_min_sqrt_num_cells,
max_sqrt_num_cells=args.{1}_max_sqrt_num_cells,
num_leaves=args.{1}_num_leaves,
num_rays=args.{1}_num_rays,
is_antiphased={0},
image_order_limit=args.image_order_limit,
prefix='{1}')
'''.format(str(args.is_antiphased[0]), args.prefix[0])
)
if len(args.hot_region_model) == 2:
if args.antipodal_reflection_symmetry:
bounds = {}
module += (
'''
values = dict(super_colatitude = derived_parameter(lambda x: math.pi - x, '{0}__super_colatitude', 'primary'),
super_radius = derived_parameter(lambda x: x, '{0}__super_radius', 'primary'),
phase_shift = derived_parameter(lambda x: x, '{0}__phase_shift', 'primary'),
super_temperature = derived_parameter(lambda x: x, '{0}__super_temperature', 'primary'),
omit_colatitude = derived_parameter(lambda x: math.pi - x, '{0}__omit_colatitude', 'primary'),
omit_radius = derived_parameter(lambda x: x, '{0}__omit_radius', 'primary'),
omit_azimuth = derived_parameter(lambda x: x, '{0}__omit_azimuth', 'primary'),
cede_colatitude = derived_parameter(lambda x: math.pi - x, '{0}__cede_colatitude', 'primary'),
cede_radius = derived_parameter(lambda x: x, '{0}__cede_radius', 'primary'),
cede_azimuth = derived_parameter(lambda x: x, '{0}__cede_azimuth', 'primary'),
cede_temperature = derived_parameter(lambda x: x, '{0}__cede_temperature', 'primary'))
'''.format(args.prefix[0])
)
else:
get_member_settings(args.hot_region_model[1])
get_bounds_and_values(args.prefix[1], args.hot_region_model[1])
module += (
'''
secondary = xpsi.HotRegion(bounds=bounds,
values=values,
symmetry=symmetry,
omit=omit,
cede=cede,
concentric=concentric,
sqrt_num_cells=args.{1}_sqrt_num_cells,
min_sqrt_num_cells=args.{1}_min_sqrt_num_cells,
max_sqrt_num_cells=args.{1}_max_sqrt_num_cells,
num_leaves=args.{1}_num_leaves,
num_rays=args.{1}_num_rays,
is_antiphased={0},
image_order_limit=args.image_order_limit,
prefix='{1}')
'''.format(str(not args.is_antiphased[0] if args.antipodal_reflection_symmetry else args.is_antiphased[1]), args.prefix[1])
)
module += (
'''
hot = HotRegions((primary, secondary))
'''
)
else:
module += (
'''
hot = primary
'''
)
if args.elsewhere_atmosphere_model is not None:
module += (
'''
elsewhere = xpsi.Elsewhere(bounds=dict(elsewhere_temperature = parse_bounds(args.elsewhere_temperature_bounds,
args.elsewhere_temperature_value)),
values=dict(elsewhere_temperature = parse_value(args.elsewhere_temperature_value)),
sqrt_num_cells=args.elsewhere_sqrt_num_cells,
num_rays=args.elsewhere_num_rays,
image_order_limit=args.image_order_limit)
'''
)
else:
module += (
'''
elsewhere = None
'''
)
module += (
'''
photosphere = CustomPhotosphere(hot = hot,
elsewhere = elsewhere,
values = dict(mode_frequency = spacetime['frequency']))
'''
)
if args.hot_atmosphere_load:
module += (
'''
photosphere.hot_atmosphere = args.hot_atmosphere_path
'''
)
if args.elsewhere_atmosphere_model is not None and args.elsewhere_atmosphere_load:
module += (
'''
photosphere.elsewhere_atmosphere = args.elsewhere_atmosphere_path
'''
)
module += (
'''
star = xpsi.Star(spacetime = spacetime, photospheres = photosphere)
prior = CustomPrior()
likelihood = xpsi.Likelihood(star = star,
signals = signals,
num_energies = args.number_energies,
threads = args.openmp_threads,
externally_updated = args.parameters_externally_updated if args.parameters_externally_updated is not None else True,
prior = prior,
max_energy = args.maximum_energy_ray_tracing)
'''
)
module += (
'''
if __name__ == '__main__':
if args.multinest:
wrapped_params = [0] * len(likelihood)
for name in likelihood.names:
if 'phase_shift' or 'azimuth' in name:
wrapped_params[likelihood.index(name)] = 1
if args.sample_files_root is None:
args.sample_files_root = 'nlive{:d}_expf{:.1f}_{}_{}_tol{:.1g}'.format(args.number_live_points,
args.hypervolume_expansion_factor,
'noCONST' if not args.constant_efficiency_variant else 'CONST',
'noMM' if not args.mode_separation_variant else 'MM',
args.estimated_remaining_log_evidence)
runtime_params = {'resume': args.resume,
'importance_nested_sampling': False, # incompatible with xpsi likelihood function
'multimodal': args.mode_separation_variant, # this variant, if activated is incompatible with nestcheck
'n_clustering_params': None,
'outputfiles_basename': os.path.join(args.sample_files_directory_path, args.sample_files_root),
'n_iter_before_update': args.number_iterations_per_write,
'n_live_points': args.number_live_points,
'sampling_efficiency': 1.0 / args.hypervolume_expansion_factor,
'const_efficiency_mode': args.constant_efficiency_variant,
'wrapped_params': wrapped_params,
'evidence_tolerance': args.estimated_remaining_log_evidence,
'max_iter': args.maximum_number_nested_replacement_iterations,
'verbose': True}
xpsi.Sample.nested(likelihood, prior, **runtime_params)
else:
pass
'''
)
if not os.path.isdir(args.module_directory_path):
os.mkdir(args.module_directory_path)
write(r'{}.py'.format(os.path.join(args.module_directory_path,
args.main_module)), module)
write(r'{}.py'.format(os.path.join(args.module_directory_path, '__init__')), '')
module = (
'''""" Signal module for X-PSI {0} modelling of {1} {2} event data. """
from __future__ import print_function, division
import numpy as np
import math
import xpsi
from xpsi.likelihoods.default_background_marginalisation import eval_marginal_likelihood
from xpsi.likelihoods.default_background_marginalisation import precomputation
class CustomSignal(xpsi.Signal):
""" A subclass of the :class:`xpsi.Signal.Signal` class to make it callable.
"""
def __init__(self, workspace_intervals = 1000, epsabs = 0, epsrel = 1.0e-8,
epsilon = 1.0e-3, sigmas = 10.0, support = None, *args, **kwargs):
""" Perform precomputation. """
super(CustomSignal, self).__init__(*args, **kwargs)
try:
self._precomp = precomputation(self._data.counts.astype(np.int32))
except AttributeError:
print('No data... can synthesise data but cannot evaluate a '
'likelihood function.')
else:
self._workspace_intervals = workspace_intervals
self._epsabs = epsabs
self._epsrel = epsrel
self._epsilon = epsilon
self._sigmas = sigmas
if support is not None:
self._support = support
else:
self._support = -1.0 * np.ones((self._data.counts.shape[0],2))
self._support[:,0] = 0.0
@property
def support(self):
return self._support
@support.setter
def support(self, obj):
self._support = obj
def __call__(self, *args, **kwargs):
self.loglikelihood, self.expected_counts, self.background_signal, self.background_signal_given_support = \\
eval_marginal_likelihood(self._data.exposure_time,
self._data.phases,
self._data.counts,
self._signals,
self._phases,
self._shifts,
self._precomp,
self._support,
self._workspace_intervals,
self._epsabs,
self._epsrel,
self._epsilon,
self._sigmas,
kwargs.get('llzero'),
background={3})
'''.format(args.model,
_telescopes,
args.source,
'None' if not args.background_model else 'self.background.registered_background')
)
write(r'{}.py'.format(os.path.join(args.module_directory_path, args.custom_signal_module)), module)
module = (
'''""" Photosphere module for X-PSI {0} modelling of {1} {2} event data. """
from __future__ import print_function, division
import argparse
import re
class ArgumentParserCustom(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
if (re.match(r'^[\s]*#', arg_line) or # look for any number of whitespace characters up to a `#` character
re.match(r'^[\s]*$', arg_line)): # look for lines containing nothing or just whitespace
return []
else:
try:
_idx = arg_line.index('#')
except ValueError:
pass
else:
arg_line = arg_line[:_idx].rstrip()
return [arg_line]
parser = ArgumentParserCustom(
description="""
Photosphere module for X-PSI {0} modelling of {1} {2} event data.
You should import this module.
For help: python %(prog)s -h
""",
fromfile_prefix_chars='@')
'''.format(args.model,
_telescopes,
args.source)
)
if args.hot_atmosphere_load:
module += (
'''
parser.add_argument('--hot-atmosphere-size',
type=int,
nargs=4,
help='Size of each of the four dimensions of the numeric atmosphere table for the hot regions.')
'''
)
if args.elsewhere_atmosphere_model is not None:
if args.elsewhere_atmosphere_load:
module += (
'''
parser.add_argument('--elsewhere-atmosphere-size',
type=int,
nargs=4,
help='Size of each of the four dimensions of the numeric atmosphere table for elsewhere.')
'''
)
module += (
'''
if __name__ == '__main__':
args, _ = parser.parse_known_args()
else:
args, _ = parser.parse_known_args(['@{}'])
'''.format(args.config_path)
)
module += (
'''
import numpy as np
import math
import xpsi
class CustomPhotosphere(xpsi.Photosphere):
'''
)
if args.hot_atmosphere_load:
module += (
'''
@xpsi.Photosphere.hot_atmosphere.setter
def hot_atmosphere(self, path):
'''
)
if ('NSX' in args.hot_atmosphere_model or 'nsx' in args.hot_atmosphere_model):
module += (
'''
table = np.loadtxt(path, dtype=np.double)
logT = np.zeros(args.hot_atmosphere_size[0])
logg = np.zeros(args.hot_atmosphere_size[1])
mu = np.zeros(args.hot_atmosphere_size[2])
logE = np.zeros(args.hot_atmosphere_size[3])
reorder_buf = np.zeros((args.hot_atmosphere_size[0],
args.hot_atmosphere_size[1],
args.hot_atmosphere_size[2],
args.hot_atmosphere_size[3],))
index = 0
for i in range(reorder_buf.shape[0]):
for j in range(reorder_buf.shape[1]):
for k in range(reorder_buf.shape[3]):
for l in range(reorder_buf.shape[2]):
logT[i] = table[index,3]
logg[j] = table[index,4]
logE[k] = table[index,0]
mu[reorder_buf.shape[2] - l - 1] = table[index,1]
reorder_buf[i,j,reorder_buf.shape[2] - l - 1,k] = 10.0**(table[index,2])
index += 1
buf = np.zeros(np.prod(reorder_buf.shape))
bufdex = 0
for i in range(reorder_buf.shape[0]):
for j in range(reorder_buf.shape[1]):
for k in range(reorder_buf.shape[2]):
for l in range(reorder_buf.shape[3]):
buf[bufdex] = reorder_buf[i,j,k,l]; bufdex += 1
self._hot_atmosphere = (logT, logg, mu, logE, buf)
'''
)
else:
module += (
'''
raise NotImplementedError('You need to implement the setter that loads the hot atmosphere table.')
'''
)
if args.elsewhere_atmosphere_model is not None:
if args.elsewhere_atmosphere_load:
module += (
'''
@xpsi.Photosphere.elsewhere_atmosphere.setter
def elsewhere_atmosphere(self, path):
'''
)
if ('NSX' in args.elsewhere_atmosphere_model or 'nsx' in args.elsewhere_atmosphere_model):
module += (
'''
table = np.loadtxt(path, dtype=np.double)
logT = np.zeros(args.elsewhere_atmosphere_size[0])
logg = np.zeros(args.elsewhere_atmosphere_size[1])
mu = np.zeros(args.elsewhere_atmosphere_size[2])
logE = np.zeros(args.elsewhere_atmosphere_size[3])
reorder_buf = np.zeros((args.elsewhere_atmosphere_size[0],
args.elsewhere_atmosphere_size[1],
args.elsewhere_atmosphere_size[2],
args.elsewhere_atmosphere_size[3]))
index = 0
for i in range(reorder_buf.shape[0]):
for j in range(reorder_buf.shape[1]):
for k in range(reorder_buf.shape[3]):
for l in range(reorder_buf.shape[2]):
logT[i] = table[index,3]
logg[j] = table[index,4]
logE[k] = table[index,0]
mu[reorder_buf.shape[2] - l - 1] = table[index,1]
reorder_buf[i,j,reorder_buf.shape[2] - l - 1,k] = 10.0**(table[index,2])
index += 1
buf = np.zeros(np.prod(reorder_buf.shape))
bufdex = 0
for i in range(reorder_buf.shape[0]):
for j in range(reorder_buf.shape[1]):
for k in range(reorder_buf.shape[2]):
for l in range(reorder_buf.shape[3]):
buf[bufdex] = reorder_buf[i,j,k,l]; bufdex += 1
self._elsewhere_atmosphere = (logT, logg, mu, logE, buf)
'''
)
else:
module += (
'''
raise NotImplementedError('You need to implement the setter that loads the elsewhere atmosphere table.')
'''
)
module += (
'''
@property
def global_variables(self):
""" For interfacing with the image-plane signal simulator.
The extension module compiled is surface_radiation_field/archive/local_variables/PDT_U.pyx,
which replaces the contents of surface_radiation_field/local_variables.pyx.
"""
'''
)
if len(args.hot_region_model) == 2:
module += (
'''
ref_p = self.hot.objects[0]
ref_s = self.hot.objects[1]
return np.array([ref_p['{0}_colatitude'],
(ref_p['phase_shift'] + {4}) * 2.0 * math.pi,
ref_p['{0}_radius'],
ref_p['{1}_colatitude'],
(ref_p['phase_shift'] + {4}) * 2.0 * math.pi {3} ref_p['{2}_azimuth'],
ref_p['{1}_radius'],
ref_s['{5}_colatitude'],
(ref_s['phase_shift'] + {9}) * 2.0 * math.pi,
ref_s['{5}_radius'],
ref_s['{6}_colatitude'],
(ref_s['phase_shift'] + {9}) * 2.0 * math.pi {8} ref_p['{7}_azimuth'],
ref_s['{6}_radius'],
{10},
{11},
{12},
{13}])
'''.format('super' if 'DT' in args.hot_region_model[0] else 'omit',
'cede' if 'DT' in args.hot_region_model[0] else 'super',
'cede' if 'DT' in args.hot_region_model[0] else 'omit',
'+' if 'DT' in args.hot_region_model[0] else '-',
str(0.0) if not args.is_antiphased[0] else str(0.5),
'super' if 'DT' in args.hot_region_model[1] else 'omit',
'cede' if 'DT' in args.hot_region_model[1] else 'super',
'cede' if 'DT' in args.hot_region_model[1] else 'omit',
'+' if 'DT' in args.hot_region_model[1] else '-',
str(0.0) if not args.is_antiphased[1] else str(0.5),
"ref_p['super_temperature']" if 'DT' in args.hot_region_model[0] else 0.0,
"ref_p['cede_temperature']" if 'DT' in args.hot_region_model[0] else "ref_p['super_temperature']",
"ref_s['super_temperature']" if 'DT' in args.hot_region_model[1] else 0.0,
"ref_s['cede_temperature']" if 'DT' in args.hot_region_model[1] else "ref_s['super_temperature']")
)
else:
module += (
'''
ref = self.hot
return np.array([ref['{0}_colatitude'],
(ref['phase_shift'] + {4}) * 2.0 * math.pi,
ref['{0}_radius'],
ref['{1}_colatitude'],
(ref['phase_shift'] + {4}) * 2.0 * math.pi {3} ref['{2}_azimuth'],
ref['{1}_radius'],
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
{5},
{6},
0.0,
0.0])
'''.format('super' if 'DT' in args.hot_region_model[0] else 'omit',
'cede' if 'DT' in args.hot_region_model[0] else 'super',
'cede' if 'DT' in args.hot_region_model[0] else 'omit',
'+' if 'DT' in args.hot_region_model[0] else '-',
str(0.0) if not args.is_antiphased[0] else str(0.5),
"ref_p['super_temperature']" if 'DT' in args.hot_region_model[0] else 0.0,
"ref_p['cede_temperature']" if 'DT' in args.hot_region_model[0] else "ref_p['super_temperature']")
)
write(r'{}.py'.format(os.path.join(args.module_directory_path, args.custom_photosphere_module)), module)
module = (
'''""" Prior module for X-PSI {0} modelling of {1} {2} event data. """
from __future__ import print_function, division
import argparse
import re
class ArgumentParserCustom(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
if (re.match(r'^[\s]*#', arg_line) or # look for any number of whitespace characters up to a `#` character
re.match(r'^[\s]*$', arg_line)): # look for lines containing nothing or just whitespace
return []
else:
try:
_idx = arg_line.index('#')
except ValueError:
pass
else:
arg_line = arg_line[:_idx].rstrip()
return [arg_line]
parser = ArgumentParserCustom(
description="""
Prior module for X-PSI {0} modelling of {1} {2} event data.
You should import this module.
For help: python %(prog)s -h
""",
fromfile_prefix_chars='@')
class CompileAction(argparse._StoreAction):
""" Compile arguments for dynamic evaluation. """
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, list):
if 'DEFAULT UNIFORM' in values:
setattr(namespace, self.dest, None)
return None
elif values == 'DEFAULT UNIFORM':
setattr(namespace, self.dest, None)
return None
if isinstance(values, list):
for i, value in enumerate(values[:-1]):
values[i] = compile(value, '<string>', 'exec')
values[-1] = compile(values[-1], '<string>', 'eval')
setattr(namespace, self.dest, values)
else:
setattr(namespace, self.dest, compile(values, '<string>', 'eval'))
parser.add_argument('--prior-import-statements',
type=str,
nargs='*',
default=['from scipy.stats import truncnorm', 'import math'],
help='Custom import statements needed for evaluation of prior CDFs. Each statement is executed with the ``exec(...)`` builtin function.')
parser.add_argument('--prior-global-statements',
type=str,
nargs='*',
help='Custom assignment statements to be evaluated on the global level that are useful, e.g., for evaluation of prior CDFs. Each statement is executed with the ``exec(...)`` builtin function.')
'''.format(args.model,
_telescopes,
args.source)
)
module += (
'''
parser.add_argument('--mass-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of gravitation mass (solar masses). {1}')
parser.add_argument('--distance-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of Earth distance (kpc). {1}')
parser.add_argument('--cos-inclination-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of cosine of Earth inclination to stellar spin axis. {1}')
parser.add_argument('--neutral-hydrogen-column-density-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of ratio of interstellar neutral hydrogen column density to the fiducial density. {1}')
parser.add_argument('--{0}-super-temperature-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of hot-region {0} superseding region log10(temperature [K]). {1}')
'''.format(args.prefix[0],
_CDF_notice)
)
if args.background_model:
for _parameter in args.background_parameters:
module += (
'''
parser.add_argument('--{0}-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of the ``{0}`` parameter. {1}')
'''.format(_parameter, _CDF_notice)
)
for instrument in args.instrument:
module += (
'''
parser.add_argument('--{0}-energy-independent-effective-area-scaling-factor-prior',
type=str,
nargs='*',
action=CompileAction,
default=[compile('truncnorm.ppf(x, -5.0, 5.0, loc=1.0, scale=0.104)', '<string>', 'eval')],
help='Prior inverse CDF of the energy-independent effective area scaling factor. {1}')
'''.format(instrument,
_CDF_notice)
)
def add_cede_temperature_prior(i):
global args
global module
if 'DT' in args.hot_region_model[i]:
module += (
'''
parser.add_argument('--{0}-cede-temperature-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of hot-region {0} ceding region log10(temperature [K]). {1}')
'''.format(args.prefix[i],
_CDF_notice)
)
add_cede_temperature_prior(0)
if len(args.hot_region_model) == 2 and not args.antipodal_reflection_symmetry:
module += (
'''
parser.add_argument('--{0}-super-temperature-prior',
type=str,
nargs='*',
action=CompileAction,
help='Prior inverse CDF of hot-region {0} superseding region log10(temperature [K]). {1}')
'''.format(args.prefix[1],
_CDF_notice)
)
add_cede_temperature_prior(1)
module += (
'''
if __name__ == '__main__':
args, _ = parser.parse_known_args()
else:
args, _ = parser.parse_known_args(['@{}'])
'''.format(args.config_path)
)
module += (
'''
import numpy as np
import math
import xpsi
from xpsi.global_imports import _2pi, gravradius, _dpr
from xpsi import Parameter
from xpsi.cellmesh.mesh_tools import eval_cedeCentreCoords as eval_coords_under_rotation
from scipy.interpolate import Akima1DInterpolator
if args.prior_import_statements is not None:
for import_statement in args.prior_import_statements:
exec(import_statement)
if args.prior_global_statements is not None:
for global_statement in args.prior_global_statements:
exec(global_statement)
'''.format(args.model,
_telescopes,
args.source)
)
module += (
'''
class CustomPrior(xpsi.Prior):
""" A joint prior PDF. """
__derived_names__ = ['compactness']
__draws_from_support__ = 4
def __init__(self):
super(CustomPrior, self).__init__()
'''
)
if ( 'CST' in args.hot_region_model
or 'CDT' in args.hot_region_model
or 'EST' in args.hot_region_model
or 'EDT' in args.hot_region_model ):
module += (
'''
self.a_f = 0.001
self.b_f = 1.0
self.a_zeta = 0.001
self.b_zeta = math.pi/2.0 - self.a_zeta
vals = np.linspace(0.0, self.b_zeta, 1000)
self._interpolator_super_smaller = Akima1DInterpolator(self._vector_super_smaller_radius_mass(vals), vals)
self._interpolator_super_smaller.extrapolate = True
'''
)
if 'PST' in args.hot_region_model or 'PDT' in args.hot_region_model:
module += (
'''
self.c_f = 0.001
self.d_f = 2.0
self.a_xi = 0.001
self.b_xi = math.pi/2.0 - self.a_xi
vals = np.linspace(0.0, self.b_xi, 1000)
self._interpolator = Akima1DInterpolator(self._vector_super_radius_mass(vals), vals)
self._interpolator.extrapolate = True
'''
)
module += (
'''
def __call__(self, p = None):
""" Evaluate distribution at point ``p``.
:param list p: Model parameter values.
:returns: Logarithm of the distribution evaluated at point ``p``.
"""
temp = super(CustomPrior, self).__call__(p)
if not np.isfinite(temp):
return temp
ref = self.parameters.star.spacetime # shortcut
# check the prior PDF support conditions below and comment out the exception throw
# if you want to condition on those model assumptions
#raise NotImplementedError('Implement the prior __call__ method.')
# based on contemporary EOS theory
if not ref['radius'] <= 16.0:
return -np.inf
# limit polar radius to be just outside the Schwarzschild photon sphere
R_p = 1.0 + ref.epsilon * (-0.788 + 1.030 * ref.zeta)
if R_p < 1.505 / ref.R_r_s:
return -np.inf
mu = math.sqrt(-1.0 / (3.0 * ref.epsilon * (-0.788 + 1.030 * ref.zeta)))
# 2-surface cross-section have a single maximum in |z|
# i.e., an elliptical surface; minor effect on support, if any,
# only for high spin frequencies
if mu < 1.0:
return -np.inf
# check effective gravity at pole (where it is maximum) and
# at equator (where it is minimum) are in NSX limits
grav = xpsi.surface_radiation_field.effective_gravity(np.array([1.0, 0.0]),
np.array([ref.R] * 2 ),
np.array([ref.zeta] * 2),
np.array([ref.epsilon] * 2))
'''
)
if 'NSX' or 'nsx' in args.hot_atmosphere_model:
module += (
'''
for g in grav:
if not 13.7 <= g <= 15.0: # check that these NSX effective gravity table limits are correct
return -np.inf
'''
)
module += (
'''
ref = self.parameters # redefine shortcut
'''
)
if len(args.hot_region_model) == 2:
if args.hot_region_model[0] == args.hot_region_model[1] and not args.antipodal_reflection_symmetry:
module += (
'''
# hot regions can exchange to yield the exact same configuration
# break degeneracy:
if ref['{0}__{2}'] > ref['{1}__{2}']:
return -np.inf
'''.format(args.prefix[0],
args.prefix[1],
args.break_hot_region_exchange_degeneracy_with)
)
if len(args.hot_region_model) == 2 and not args.antipodal_reflection_symmetry:
_ST_idx = None
if args.hot_region_model[0] == 'ST':
_ST_idx = 0
elif args.hot_region_model[1] == 'ST':
_ST_idx = 1
module += (
'''
# require that hot regions do not overlap within the prior support'''
)
if _ST_idx is not None:
if args.hot_region_model[1 - _ST_idx] == 'ST':
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'super', 'super', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_ST_idx],
args.prefix[1 - _ST_idx],
0.5 if args.is_antiphased[_ST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _ST_idx] else 0.0)
)
elif args.hot_region_model[1 - _ST_idx] in ['CDT', 'EDT']:
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'super', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_ST_idx],
args.prefix[1 - _ST_idx],
0.5 if args.is_antiphased[_ST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _ST_idx] else 0.0)
)
elif args.hot_region_model[1 - _ST_idx] == 'PDT':
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'super', 'super', {2:.1f}, {3:.1f}):
return -np.inf
if self._overlap(ref, '{0}', '{1}', 'super', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_ST_idx],
args.prefix[1 - _ST_idx],
0.5 if args.is_antiphased[_ST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _ST_idx] else 0.0)
)
elif args.hot_region_model[1 - _ST_idx] in ['CST', 'EST']:
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'super', 'cede', {2:.1f}, {3:.1f}):
if not self._overlap(ref, '{0}', '{1}', 'super', 'omit', {2:.1f}, {3:.1f}, superset='{1}'):
return -np.inf
'''.format(args.prefix[_ST_idx],
args.prefix[1 - _ST_idx],
0.5 if args.is_antiphased[_ST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _ST_idx] else 0.0)
)
elif args.hot_region_model[1 - _ST_idx] == 'PST':
module += (
'''
if self._overlap_outside_mask(ref, '{0}', '{1}', 'super', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_ST_idx],
args.prefix[1 - _ST_idx],
0.5 if args.is_antiphased[_ST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _ST_idx] else 0.0)
)
_DT_idx = None
if _ST_idx is None:
if args.hot_region_model[0] in ['CDT','EDT']:
_DT_idx = 0
elif args.hot_region_model[1] in ['CDT','EDT']:
_DT_idx = 1
if _ST_idx is None and _DT_idx is not None:
if args.hot_region_model[1 - _DT_idx] in ['CDT', 'EDT']:
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'cede', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_DT_idx],
args.prefix[1 - _DT_idx],
0.5 if args.is_antiphased[_DT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _DT_idx] else 0.0)
)
elif args.hot_region_model[1 - _DT_idx] == 'PDT':
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'cede', 'super', {2:.1f}, {3:.1f}):
return -np.inf
if self._overlap(ref, '{0}', '{1}', 'cede', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_DT_idx],
args.prefix[1 - _DT_idx],
0.5 if args.is_antiphased[_DT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _DT_idx] else 0.0)
)
elif args.hot_region_model[1 - _DT_idx] in ['CST', 'EST']:
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'cede', 'super', {2:.1f}, {3:.1f}):
if not self._overlap(ref, '{0}', '{1}', 'cede', 'omit', {2:.1f}, {3:.1f}, superset='{1}'):
return -np.inf
'''.format(args.prefix[_DT_idx],
args.prefix[1 - _DT_idx],
0.5 if args.is_antiphased[_DT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _DT_idx] else 0.0)
)
elif args.hot_region_model[1 - _DT_idx] == 'PST':
module += (
'''
if self._overlap_outside_mask(ref, '{0}', '{1}', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_DT_idx],
args.prefix[1 - _DT_idx],
0.5 if args.is_antiphased[_DT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _DT_idx] else 0.0)
)
_PDT_idx = None
if _ST_idx is None and _DT_idx is None:
if args.hot_region_model[0] == 'PDT':
_PDT_idx = 0
elif args.hot_region_model[1] == 'PDT':
_PDT_idx = 1
if _ST_idx is None and _DT_idx is None and _PDT_idx is not None:
if args.hot_region_model[1 - _PDT_idx] == 'PDT':
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'super', 'super', {2:.1f}, {3:.1f}):
return -np.inf
if self._overlap(ref, '{0}', '{1}', 'super', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
if self._overlap(ref, '{0}', '{1}', 'cede', 'super', {2:.1f}, {3:.1f}):
return -np.inf
if self._overlap(ref, '{0}', '{1}', 'cede', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_PDT_idx],
args.prefix[1 - _PDT_idx],
0.5 if args.is_antiphased[_PDT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _PDT_idx] else 0.0)
)
elif args.hot_region_model[1 - _PDT_idx] in ['CST', 'EST']:
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'super', 'super', {2:.1f}, {3:.1f}):
if not self._overlap(ref, '{0}', '{1}', 'super', 'omit', {2:.1f}, {3:.1f}, superset='{1}'):
return -np.inf
if self._overlap(ref, '{0}', '{1}', 'cede', 'super', {2:.1f}, {3:.1f}):
if not self._overlap(ref, '{0}', '{1}', 'cede', 'omit', {2:.1f}, {3:.1f}, superset='{1}'):
return -np.inf
'''.format(args.prefix[_PDT_idx],
args.prefix[1 - _PDT_idx],
0.5 if args.is_antiphased[_PDT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _PDT_idx] else 0.0)
)
elif args.hot_region_model[1 - _PDT_idx] == 'PST':
module += (
'''
if self._overlap_outside_mask(ref, '{0}', '{1}', 'super', {2:.1f}, {3:.1f}):
return -np.inf
elif self._overlap_outside_mask(ref, '{0}', '{1}', 'cede', {2:.1f}, {3:.1f}):
return -np.inf
'''.format(args.prefix[_PDT_idx],
args.prefix[1 - _PDT_idx],
0.5 if args.is_antiphased[_PDT_idx] else 0.0,
0.5 if args.is_antiphased[1 - _PDT_idx] else 0.0)
)
_OST_idx = None
if _ST_idx is None and _DT_idx is None and _PDT_idx is None:
if args.hot_region_model[0] in ['CST','EST']:
_OST_idx = 0
elif args.hot_region_model[1] in ['CST','EST']:
_OST_idx = 1
if _ST_idx is None and _DT_idx is None and _PDT_idx is None and _OST_idx is not None:
if args.hot_region_model[1 - _OST_idx] in ['CST', 'EST']:
module += (
'''
if self._overlap(ref, '{0}', '{1}', 'cede', 'cede', {2:.1f}, {3:.1f}):
if not self._overlap(ref, '{0}', '{1}', 'cede', 'omit', {2:.1f}, {3:.1f}, superset='{1}'):
if not self._overlap(ref, '{0}', '{1}', 'omit', 'cede', {2:.1f}, {3:.1f}, superset='{0}'):
return -np.inf
'''.format(args.prefix[_OST_idx],
args.prefix[1 - _OST_idx],
0.5 if args.is_antiphased[_OST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _OST_idx] else 0.0)
)
elif args.hot_region_model[1 - _OST_idx] == 'PST':
module += (
'''
# incomplete prior support:
# some configurations should be included but are not with the conditions below
if self._overlap_outside_mask(ref, '{0}', '{1}', 'super', {2:.1f}, {3:.1f}):
if self._overlap_outside_mask(ref, '{1}', '{0}', 'super', {3:.1f}, {2:.1f}):
return -np.inf
'''.format(args.prefix[_OST_idx],
args.prefix[1 - _OST_idx],
0.5 if args.is_antiphased[_OST_idx] else 0.0,
0.5 if args.is_antiphased[1 - _OST_idx] else 0.0)
)
if _ST_idx is None and _DT_idx is None and _PDT_idx is None and _OST_idx is None:
module += (
'''
# incomplete prior support:
# some configurations should be included but are not with the conditions below
if self._overlap_outside_mask(ref, '{0}', '{1}', 'super', {2:.1f}, {3:.1f}):
if self._overlap_outside_mask(ref, '{1}', '{0}', 'super', {3:.1f}, {2:.1f}):
return -np.inf
'''.format(args.prefix[0],
args.prefix[1],
0.5 if args.is_antiphased[0] else 0.0,
0.5 if args.is_antiphased[1] else 0.0)
)
module += (
'''
return 0.0
'''
)
if len(args.hot_region_model) > 1:
module += (
'''
@staticmethod
def _colatitude(ref, z, z_member):
""" Helper to bypass exception for concentric regions. """
try:
return ref[z + '__' + z_member + '_colatitude']
except KeyError:
return ref[z + '__super_colatitude']
@staticmethod
def _azimuth(ref, z, z_member):
""" Helper to bypass exception for concentric regions. """
try:
return ref[z + '__' + z_member + '_azimuth']
except KeyError:
return 0.0
def _overlap(self, ref, x, y, x_member, y_member, x_antiphase, y_antiphase, superset=None, use_cached=False):
""" Determine overlap between two spherical circles. """
if not use_cached:
_tmp_phase_x = (ref[x + '__phase_shift'] + x_antiphase) * _2pi
if x_member == 'super':
_tmp_phase_x -= self._azimuth(ref, x, 'omit')
elif x_member == 'cede':
_tmp_phase_x += self._azimuth(ref, x, 'cede')
_tmp_phase_y = (ref[y + '__phase_shift'] + y_antiphase) * _2pi
if y_member == 'super':
_tmp_phase_y -= self._azimuth(ref, y, 'omit')
elif y_member == 'cede':
_tmp_phase_y += self._azimuth(ref, y, 'cede')
_phi = _tmp_phase_x - _tmp_phase_y
self._ang_sep = xpsi.HotRegion.psi(self._colatitude(ref, x, x_member),
_phi,
self._colatitude(ref, y, y_member))
if superset is None:
if self._ang_sep < ref[x + '__' + x_member + '_radius'] + ref[y + '__' + y_member + '_radius']:
return True
elif superset == y:
if self._ang_sep + ref[x + '__' + x_member + '_radius'] < ref[y + '__' + y_member + '_radius']:
return True
elif superset == x:
if ref[x + '__' + x_member + '_radius'] > self._ang_sep + ref[y + '__' + y_member + '_radius']:
return True
return False
'''
)
if 'PST' in args.hot_region_model and len(args.hot_region_model) > 1:
module += (
'''
def _overlap_outside_mask(self, ref, x, y, x_member, x_antiphase, y_antiphase):
""" Determine if two spherical circles overlap outisde of a masking spherical circle. """
# check if x and PST-super overlap
if not self._overlap(ref, x, y, x_member, 'super', x_antiphase, y_antiphase): # then no overlap
return False
# check if PST-super is entirely engulfed by x
if self._overlap(ref, x, y, x_member, 'super', x_antiphase, y_antiphase,
superset=x, use_cached=True): # then overlap
return True
phi = ( ref[x + '__phase_shift'] + x_antiphase - ( ref[y + '__phase_shift'] + y_antiphase ) ) * _2pi
# find the x and PST-super region centre coordinates in rotated coordinate frame
x__ang_sep, x__azi = eval_coords_under_rotation(-1.0*self._colatitude(ref, y, 'omit'),
self._colatitude(ref, x, x_member),
phi)
y__ang_sep, y__super_azi = eval_coords_under_rotation(-1.0*self._colatitude(ref, y, 'omit'),
ref[y + '__super_colatitude'],
-1.0*self._azimuth(ref, y, 'omit'))
# check if x overlaps with mask
if x__ang_sep > ref[x + '__super_radius'] + ref[y + '__omit_radius']: # then overlap
return True
# check if x is entirely engulfed by mask
if x__ang_sep + ref[x + '__super_radius'] < ref[y + '__omit_radius']: # then no overlap
return False
# check if mask is entirely within PST-super
# meaning ring-like topology and thus x must overlap with PST-super
if y__ang_sep + ref[y + '__omit_radius'] < ref[y + '__super_radius']: # then overlap
return True
elif x__ang_sep + ref[y + '__omit_radius'] < ref[x + '__super_radius']: # then overlap
return True
# finding the terminal half angles in rotated coordinate frame
x__phi_term = self._phi_calculator(ref[y + '__omit_radius'],
ref[x + '__super_radius'],
x__ang_sep)
y__phi_term = self._phi_calculator(ref[y + '__omit_radius'],
ref[y + '__super_radius'],
y__ang_sep)
# find the terminal intervals
x__term_int = np.array([self._make_periodic(x__azi - x__phi_term), self._make_periodic(x__azi + x__phi_term)])
y__term_int = np.array([self._make_periodic(y__super_azi - y__phi_term), self._make_periodic(y__super_azi + y__phi_term)])
# find the widest interval first, and use it as the reference interval
# the widest interval cannot by definition be within the narrowest interval
# so if there is an overlap, at least one of the narrowest interval bounds
# are within the widest interval
if x__term_int[0] > x__term_int[1]:
_x__interval_width = x__term_int[1] + (2.0*np.pi - x__term_int[0])
else:
_x__interval_width = x__term_int[1] - x__term_int[0]
if y__term_int[0] > y__term_int[1]:
_y__interval_width = y__term_int[1] + (2.0*np.pi - y__term_int[0])
else:
_y__interval_width = y__term_int[1] - y__term_int[0]
_widest_interval = x__term_int if _x__interval_width > _y__interval_width else y__term_int
_narrowest_interval = x__term_int if _y__interval_width < _y__interval_width else y__term_int
# check if terminal azimuth intervals overlap
if _widest_interval[0] > _widest_interval[1]:
if _narrowest_interval[0] < _widest_interval[1] or _narrowest_interval[0] > _widest_interval[0]\\
or _narrowest_interval[1] < _widest_interval[1] or _narrowest_interval[1] > _widest_interval[0]: # then overlap
return True
else:
if _widest_interval[0] < _narrowest_interval[0] < _widest_interval[1]\\
or _widest_interval[0] < _narrowest_interval[1] < _widest_interval[1]: # then overlap
return True
# check if both regions have a solution to maximum azimuthal extent w.r.t mask centre
# also need to check if the x region or PST-super region contains the antipode of the mask
# centre, because in this case there is no solution either
if x__ang_sep + ref[x + '__super_radius'] > np.pi and y__ang_sep + ref[y + '__super_radius'] > np.pi:
return True
if x__ang_sep < ref[x + '__super_radius']:
x__sep_max_azi = 0.0
elif x__ang_sep + ref[x + '__super_radius'] < np.pi:
# find colatitude of the point subtended by the maximum azimuthal points w.r.t mask centre
x__sep_max_azi = np.arccos(np.cos(x__ang_sep) / np.cos(ref[x + '__super_radius']))
else:
x__sep_max_azi = np.pi
if y__ang_sep < ref[y + '__super_radius']:
y__sep_max_azi = 0.0
elif y__ang_sep + ref[y + '__super_radius'] < np.pi:
y__sep_max_azi = np.arccos(np.cos(y__ang_sep) / np.cos(ref[y + '__super_radius']))
else:
y__sep_max_azi = np.pi
# check if maximum azimuthal points for both x and PST-super regions are greater than mask radius
if (x__sep_max_azi > ref[y + '__omit_radius']) and (y__sep_max_azi > ref[y + '__omit_radius']): # then overlap
return True
# find the angle between the lines joining the terminal points to the region centres
x__term_ang = self._phi_calculator(ref[y + '__omit_radius'],
x__ang_sep,
ref[x + '__super_radius'])
y__term_ang = self._phi_calculator(ref[y + '__omit_radius'],
y__ang_sep,
ref[y + '__super_radius'])
_tmp = np.abs(x__term_ang - math.pi/2) > np.abs(y__term_ang - math.pi/2)
# check if only the x maximum azimuthal points are outside the mask region
if x__sep_max_azi > ref[y + '__omit_radius'] and _tmp: # then overlap
return True
# check if only the PST-super maximum azimuthal points are outside the mask region
if y__sep_max_azi > ref[y + '__omit_radius'] and not _tmp: # then overlap
return True
# else, the overlap between the regions is a subset of the mask region, and permitted
return False
@staticmethod
def _phi_calculator(psi, zeta, v):
cos_phi = (np.cos(zeta) - np.cos(v) * np.cos(psi)) / (np.sin(v) * np.sin(psi))
return np.arccos(cos_phi)
@staticmethod
def _make_periodic(angle):
if angle < 0:
angle += _2pi
elif angle > _2pi:
angle -= _2pi
return angle
'''
)
if ( 'CST' in args.hot_region_model
or 'CDT' in args.hot_region_model
or 'EST' in args.hot_region_model
or 'EDT' in args.hot_region_model ):
module += (
'''
def _I_super_smaller(self, x):
return x * np.log(self.b_zeta/self.a_zeta)
def _II_super_smaller(self, x):
return x - self.a_zeta - x*np.log(x/self.b_zeta)
def _scalar_super_smaller_radius_mass(self, x):
if x >= self.a_zeta:
mass = self._II_super_smaller(x)
else:
mass = self._I_super_smaller(x)
return mass
def _vector_super_smaller_radius_mass(self, x):
masses = np.zeros(len(x))
for i, _ in enumerate(x):
masses[i] = self._scalar_super_smaller_radius_mass(_)
masses /= (self.b_f - self.a_f)
masses /= (self.b_zeta - self.a_zeta)
return masses
def _inverse_sample_cede_larger_radius(self, x, psi):
if psi < self.a_zeta:
return self.a_zeta*np.exp(x * np.log(self.b_zeta/self.a_zeta))
else:
return psi*np.exp(x*np.log(self.b_zeta/psi))
'''
)
if 'PST' in args.hot_region_model or 'PDT' in args.hot_region_model:
module += (
'''
def _I(self, x):
return x * np.log(self.b_xi/self.a_xi)
def _II(self, x):
return 2.0*(x - self.a_xi) - x*np.log(x/self.b_xi)
def _scalar_super_radius_mass(self, x):
if x >= self.a_xi:
mass = self._II(x)
else:
mass = self._I(x)
return mass
def _vector_super_radius_mass(self, x):
masses = np.zeros(len(x))
for i, _ in enumerate(x):
masses[i] = self._scalar_super_radius_mass(_)
masses /= (self.d_f - self.c_f)
masses /= (self.b_xi - self.a_xi)
return masses
def _inverse_sample_cede_radius(self, x, psi):
if psi < self.a_xi:
return self.a_xi*np.exp(x * np.log(self.b_xi/self.a_xi))
elif psi >= self.a_xi and x <= 1.0/(1.0 + np.log(self.b_xi/psi)):
return x*psi*(1.0 + np.log(self.b_xi/psi))
else:
return psi*np.exp(x*(1.0 + np.log(self.b_xi/psi)) - 1.0)
'''
)
module += (
'''
def inverse_sample(self, hypercube=None):
""" Draw sample uniformly from the distribution via inverse sampling. """
global args
to_cache = self.parameters.vector
if hypercube is None:
hypercube = np.random.rand(len(self))
# the base method is useful, so to avoid writing that code again:
_ = super(CustomPrior, self).inverse_sample(hypercube)
ref = parameters = self.parameters # redefine shortcut
'''
)
module += (
'''
try:
self._modded_names
except AttributeError:
self._modded_names = [name.replace('__', '_') for name in ref.names]
for modded_name, name in zip(self._modded_names, ref.names):
if getattr(args, modded_name + '_prior', None) is not None:
idx = ref.index(name)
x = hypercube[idx]
for _statement in getattr(args, modded_name + '_prior')[:-1]:
exec(_statement)
ref[name] = eval(getattr(args, modded_name + '_prior')[-1])
'''
)
def construct_hot_region_prior(i):
global args
global module
module += (
'''
# inverse sample parameters of hot-region {0}
idx = ref.index('{0}__super_colatitude')
a, b = ref.get_param('{0}__super_colatitude').bounds
a = math.cos(a); b = math.cos(b)
ref['{0}__super_colatitude'] = math.acos(b + (a - b) * hypercube[idx])
'''.format(args.prefix[i])
)
if args.hot_region_model[i] in ['CST','CDT','EST','EDT']:
module += (
'''
# radius of superseding region (omit or super code object)
idx = ref.index('{0}__{1}_radius')
ref['{0}__{1}_radius'] = float(self._interpolator_super_smaller(hypercube[idx]))
# radius of ceding region (super or cede code object)
idx = ref.index('{0}__{2}_radius')
ref['{0}__{2}_radius'] = self._inverse_sample_cede_larger_radius(hypercube[idx], ref['{0}__{1}_radius'])
'''.format(args.prefix[i],
'omit' if 'ST' in args.hot_region_model[i] else 'super',
'super' if 'ST' in args.hot_region_model[i] else 'cede')
)
elif args.hot_region_model[i] in ['EST','EDT']:
module += (
'''
# coordinates of mask or ceding region (omit or cede code object)
idx = ref.index('{0}__{3}_colatitude')
ref[idx] = hypercube[idx] * (ref['{0}__{2}_radius'] - ref['{0}__{1}_radius'])
ref[idx], ref['{0}__{3}_azimuth'] = eval_coords_under_rotation(ref['{0}__super_colatitude'],
ref['{0}__{3}_colatitude'],
ref['{0}__{3}_azimuth'])
'''.format(args.prefix[i],
'omit' if 'ST' in args.hot_region_model[i] else 'super',
'super' if 'ST' in args.hot_region_model[i] else 'cede',
'omit' if 'ST' in args.hot_region_model[i] else 'cede')
)
elif args.hot_region_model[i] in ['PST','PDT']:
module += (
'''
# radius of superseding region (omit or super code object)
idx = ref.index('{0}__{1}_radius')
ref['{0}__{1}_radius'] = float(self._interpolator(hypercube[idx]))
# radius of ceding region (super or cede code object)
idx = ref.index('{0}__{2}_radius')
ref['{0}__{2}_radius'] = self._inverse_sample_cede_radius(hypercube[idx], ref['{0}__{1}_radius'])
# coordinates of mask or ceding region (omit or cede code object)
idx = ref.index('{0}__{3}_colatitude')
if ref['{0}__{1}_radius'] <= ref['{0}__{2}_radius']:
ref[idx] = hypercube[idx] * (ref['{0}__{2}_radius'] + ref['{0}__{1}_radius'])
else:
ref[idx] = ( ref['{0}__{1}_radius']
- ref['{0}__{2}_radius']
+ 2.0*hypercube[idx]*ref['{0}__{2}_radius'] )
ref[idx], ref['{0}__{3}_azimuth'] = eval_coords_under_rotation(ref['{0}__super_colatitude'],
ref['{0}__{3}_colatitude'],
ref['{0}__{3}_azimuth'])
'''.format(args.prefix[i],
'omit' if 'ST' in args.hot_region_model[i] else 'super',
'super' if 'ST' in args.hot_region_model[i] else 'cede',
'omit' if 'ST' in args.hot_region_model[i] else 'cede')
)
construct_hot_region_prior(0)
if len(args.hot_region_model) == 2 and not args.antipodal_reflection_symmetry:
construct_hot_region_prior(1)
module += (
'''
# restore proper cache
for parameter, cache in zip(self.parameters, to_cache):
parameter.cached = cache
# it is important that we return the desired vector because it is
# automatically written to disk by MultiNest and only by MultiNest
return self.parameters.vector
'''
)
module += (
'''
def transform(self, p, **kargs):
""" A transformation for post-processing. """
p = list(p) # copy
# used ordered names and values
ref = dict(zip(self.parameters.names, p))
# compactness ratio M/R_eq
p += [gravradius(ref['mass']) / ref['radius']]
return p
'''
)
write(r'{}.py'.format(os.path.join(args.module_directory_path, args.custom_prior_module)), module)
module = (
'''""" Interstellar module for X-PSI {0} modelling of {1} {2} event data. """
from __future__ import print_function, division
import numpy as np
import math
import xpsi
from xpsi import Parameter
from scipy.interpolate import Akima1DInterpolator
class CustomInterstellar(xpsi.Interstellar):
""" Apply interstellar attenuation model {3}. """
def __init__(self, energies, attenuation, bounds, values = None):
if values is None: values = {{}}
assert len(energies) == len(attenuation), 'Array length mismatch.'
self._lkp_energies = energies # for lookup
self._lkp_attenuation = attenuation # for lookup
N_H = Parameter('neutral_hydrogen_column_density',
strict_bounds = (0.0, 50.0),
bounds = bounds.get('neutral_hydrogen_column_density', None),
doc = 'Neutral hydrogen column density in units of the fiducial column density',
symbol = r'$N_{{\\rm H}}$',
value = values.get('neutral_hydrogen_column_density', None),
permit_prepend = False)
self._interpolator = Akima1DInterpolator(self._lkp_energies,
self._lkp_attenuation)
self._interpolator.extrapolate = True
super(CustomInterstellar, self).__init__(N_H)
def attenuation(self, energies):
""" Interpolate the attenuation coefficients.
Useful for post-processing.
"""
return self._interpolate(energies)**(self['neutral_hydrogen_column_density'])
def _interpolate(self, energies):
""" Helper. """
_att = self._interpolator(energies)
_att[_att < 0.0] = 0.0
return _att
@classmethod
def load(cls, path,
energy_column=0,
attenuation_column=1,
**kwargs):
""" Load attenuation file. """
# check the loading assumptions and comment out the exception throw if they are true
#raise NotImplementedError('Implement the class method to load the interstellar attenuation table.')
# template
temp = np.loadtxt(path, dtype=np.double)
energies = temp[:,energy_column]
attenuation = temp[:,attenuation_column]
return cls(energies, attenuation, **kwargs)
'''.format(args.model,
_telescopes,
args.source,
args.attenuation_model)
)
write(r'{}.py'.format(os.path.join(args.module_directory_path, args.custom_interstellar_module)), module)
_instruments = args.instrument[0]
for _x in args.instrument[1:-1]:
_instruments += ', {}'.format(_x)
_instruments += ', and {}'.format(args.instrument[-1])
module = (
'''""" Instrument module for X-PSI {0} modelling of {1} {2} event data. """
from __future__ import print_function, division
import numpy as np
import math
import xpsi
from xpsi import Parameter, make_verbose
class CustomInstrument(xpsi.Instrument):
""" {3}. """
def construct_matrix(self):
""" Implement response matrix parameterisation. """
matrix = self['energy_independent_effective_area_scaling_factor'] * self.matrix
matrix[matrix < 0.0] = 0.0
return matrix
def __call__(self, signal, *args):
""" Overwrite. """
matrix = self.construct_matrix()
self._cached_signal = np.dot(matrix, signal)
return self._cached_signal
'''.format(args.model,
_telescopes,
args.source,
_instruments)
)
for instrument in args.instrument:
module += (
'''
@classmethod
@make_verbose('Loading {0} response matrix',
'Response matrix loaded')
def {0}(cls,
bounds,
values,
ARF,
RMF,
channel_energies,
max_input,
max_channel,
min_input=0,
min_channel=0,
effective_area_scaling_factor=1.0,
ARF_skiprows=0,
ARF_low_column=1,
ARF_high_column=2,
ARF_area_column=3,
RMF_skiprows=0,
RMF_usecol=-1,
channel_energies_skiprows=0,
channel_energies_low_column=0,
**kwargs):
""" Load {0} instrument response matrix. """
alpha = Parameter('{1}',
strict_bounds = (0.1,1.9),
bounds = bounds.get('{1}', None),
doc='{0} energy-independent effective area scaling factor',
symbol = r'$\\alpha_{{\\rm {0}}}$',
value = values.get('{1}',
1.0 if bounds.get('{1}', None) is None else None))
# check the loading assumptions and comment out the exception throw if they are true
#raise NotImplementedError('Implement the class method for loading the {0} instrument.')
# template
#ARF = np.loadtxt(ARF, dtype=np.double, skiprows=ARF_skiprows)
#RMF = np.loadtxt(RMF, dtype=np.double, skiprows=RMF_skiprows, usecols=RMF_usecol)
#channel_energies = np.loadtxt(channel_energies, dtype=np.double, skiprows=channel_energies_skiprows)
#matrix = np.zeros((channel_energies.shape[0], ARF.shape[0]))
#for i in range(ARF.shape[0]):
# matrix[:,i] = RMF[i*channel_energies.shape[0]:(i+1)*channel_energies.shape[0]]
#max_input = int(max_input)
#if min_input != 0:
# min_input = int(min_input)
#edges = np.zeros(max_input - min_input + 1, dtype=np.double)
#edges[0] = ARF[min_input, ARF_low_column]; edges[1:] = ARF[min_input:max_input, ARF_high_column]
#RSP = np.zeros((max_channel - min_channel,
# max_input - min_input), dtype=np.double)
#for i in range(RSP.shape[0]):
# RSP[i,:] = matrix[i+min_channel, min_input:max_input] * ARF[min_input:max_input, ARF_area_column] * effective_area_scaling_factor
#channels = np.arange(min_channel, max_channel)
_RMF_path = RMF
ARF = np.loadtxt(ARF, dtype=np.double, skiprows=ARF_skiprows)
RMF = np.loadtxt(_RMF_path, dtype=np.double, skiprows=RMF_skiprows, usecols=RMF_usecol)
RMF_zerocol = np.loadtxt(_RMF_path, dtype=np.double, skiprows=RMF_skiprows, usecols=0)
channel_energies = np.loadtxt(channel_energies, dtype=np.double, skiprows=channel_energies_skiprows)
matrix = np.zeros((channel_energies.shape[0], ARF.shape[0]))
last = 0
k = 0
counter = 0
for i in range(RMF_zerocol.shape[0]):
if math.floor(RMF_zerocol[i]) == RMF_zerocol[i] and RMF_zerocol[i] != 0.0:
counter += 1
if i == 0: continue
else:
for j in range(i - last):
matrix[channel_energies.shape[0] - i + last + j, k] = RMF[last + j] #* ARF[k, ARF_area_column]
#if i - last != channel_energies.shape[0]:
# print('arf i=%i'%RMF_zerocol[i], 'i=%i'%i, 'last=%i'%last, 'nchans=%i'%(i - last))
last = i
k += 1
max_input = int(max_input)
if min_input != 0:
min_input = int(min_input)
edges = np.zeros(max_input - min_input + 1, dtype=np.double)
edges[0] = ARF[min_input, ARF_low_column]; edges[1:] = ARF[min_input:max_input, ARF_high_column]
RSP = np.zeros((max_channel - min_channel,
max_input - min_input), dtype=np.double)
for i in range(RSP.shape[0]):
RSP[i,:] = matrix[i+min_channel, min_input:max_input] * ARF[min_input:max_input, ARF_area_column] * effective_area_scaling_factor
channels = np.arange(min_channel, max_channel)
return cls(RSP,
edges,
channels,
channel_energies[min_channel:max_channel+1,channel_energies_low_column],
alpha, **kwargs)
'''.format(instrument,
'energy_independent_effective_area_scaling_factor')
)
write(r'{}.py'.format(os.path.join(args.module_directory_path, args.custom_instrument_module)), module)
if not args.background_model:
sys.exit(0)
module = (
'''""" Background module for X-PSI {0} modelling of {1} {2} event data. """
from __future__ import print_function, division
import numpy as np
import math
import xpsi
from xpsi import Parameter
'''.format(args.model,
_telescopes,
args.source)
)
def _write_background_class(instrument=None):
global module
module += (
'''
class {0}CustomBackground(xpsi.Background):
""" Model for incident background photon specific flux. """
def __init__(self, bounds=None, value=None):
# check the model implementation and comment out the exception
# throw if appropriate
raise NotImplementedError('Implement the background model.')
# template is a powerlaw model
doc = """
Powerlaw spectral index.
"""
index = xpsi.Parameter('powerlaw_index',
strict_bounds = (-5.0, -1.01),
bounds = bounds.get('powerlaw_index', None),
doc = doc,
symbol = r'$\\Gamma{2}$',
value = values.get('powerlaw_index', None),
permit_prepend = {1})
doc = """
Powerlaw spectral normalization (photons/keV/s/cm^2 @ 1 keV).
"""
index = xpsi.Parameter('powerlaw_normalization',
strict_bounds = (0.0, None),
bounds = bounds.get('powerlaw_normalization', None),
doc = doc,
symbol = r'$N{2}$',
value = values.get('powerlaw_normalization', None),
permit_prepend = {1})
super(CustomBackground, self).__init__(index)
def __call__(self, energy_edges, phases):
""" Evaluate the incident background field. """
# check the model implementation and comment out the exception
# throw if appropriate
raise NotImplementedError('Implement the background model.')
# template is a powerlaw model
G = self['powerlaw_index']
temp = np.zeros((energy_edges.shape[0] - 1, phases.shape[0]))
temp[:,0] = (energy_edges[1:]**(G + 1.0) - energy_edges[:-1]**(G + 1.0)) / (G + 1.0)
for i in range(phases.shape[0]):
temp[:,i] = temp[:,0]
self.background = temp * self['powerlaw_normalization']
'''.format(instrument + '_' if instrument is not None else '',
'False' if args.background_shared_class else 'True',
'_{{\\rm {0}}}'.format(instrument) if instrument is not None else '')
)
if args.background_shared_class:
_write_background_class(None)
else:
for _instrument in args.instrument:
_write_background_class(_instrument)
write(r'{}.py'.format(os.path.join(args.module_directory_path, args.custom_background_module)), module)
| 39.824677 | 942 | 0.543561 |
be11892e10d42e82e2d1cf1b89b402ccc30b425d | 2,035 | py | Python | integrations/tensorflow/e2e/pytree_test.py | schoppmp/iree | d573c3dbb4eef8044764ae6d80ca79e37e8de522 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/pytree_test.py | schoppmp/iree | d573c3dbb4eef8044764ae6d80ca79e37e8de522 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/pytree_test.py | schoppmp/iree | d573c3dbb4eef8044764ae6d80ca79e37e8de522 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from iree.tf.support import tf_test_utils
import tensorflow as tf
# Empty lists and dicts are currently unsupported. IREE also currently cannot
# represent multiple sequence types, so we turn all sequences into tuples.
class PyTreeModule(tf_test_utils.TestModule):
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_tuple_len_1(self):
return (0,)
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_tuple_len_2(self):
return 0, 1
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_tuple_len_3(self):
return 0, 1, 2
@tf_test_utils.tf_function_unit_test(input_signature=[])
def output_nested_pytree(self):
return {"key_a": (0, 1, 2), "key_b": (0, 1, {"key_c": (0, 1)})}
@tf_test_utils.tf_function_unit_test(input_signature=[{
"key_a": (tf.TensorSpec([]), tf.TensorSpec([]), tf.TensorSpec([])),
"key_b": (tf.TensorSpec([]), tf.TensorSpec([]), {
"key_c": (tf.TensorSpec([]), tf.TensorSpec([]))
})
}])
def input_nested_pytree(self, input_pytree):
return input_pytree
class PyTreeTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(PyTreeModule)
def main(argv):
del argv # Unused
PyTreeTest.generate_unit_tests(PyTreeModule)
tf.test.main()
if __name__ == '__main__':
app.run(main)
| 31.307692 | 77 | 0.728747 |
7ecdcaab0392580a78d34ff3dbcd8ceab1601a2e | 737 | py | Python | head_dataframe_by_chunk.py | ermakovpetr/head_dataframe_by_chunk | fcc361ce68be9a516c9cb2ddbf64e6595cbebb18 | [
"MIT"
] | 1 | 2017-11-08T10:26:52.000Z | 2017-11-08T10:26:52.000Z | head_dataframe_by_chunk.py | ermakovpetr/head_dataframe_by_chunk | fcc361ce68be9a516c9cb2ddbf64e6595cbebb18 | [
"MIT"
] | null | null | null | head_dataframe_by_chunk.py | ermakovpetr/head_dataframe_by_chunk | fcc361ce68be9a516c9cb2ddbf64e6595cbebb18 | [
"MIT"
] | null | null | null | def head_dataframe_by_chunk(df, size_chunk_columns=6, n_rows=2):
from IPython.core.display import display, HTML
if type(size_chunk_columns) is list:
current_column = 0
for i in size_chunk_columns:
display(HTML('<style> .df thead tr { background-color: #B0B0B0; } </style>' +
df.iloc[:,current_column:current_column+i].head(n_rows).to_html(classes='df')))
current_column += i
elif type(size_chunk_columns) is int:
for i in range(0, df.shape[1], size_chunk_columns):
display(HTML('<style> .df thead tr { background-color: #B0B0B0; } </style>' +
df.iloc[:,i:i+size_chunk_columns].head(n_rows).to_html(classes='df')))
| 52.642857 | 104 | 0.625509 |
44233e57da9d902a9011b8a8d9589196ebadcfe2 | 1,295 | py | Python | test/test_paged_alert_with_stats.py | mdennehy/python-client | 4d9cfa32075a6a65d88a38fe9e72b282e87b8808 | [
"Apache-2.0"
] | null | null | null | test/test_paged_alert_with_stats.py | mdennehy/python-client | 4d9cfa32075a6a65d88a38fe9e72b282e87b8808 | [
"Apache-2.0"
] | null | null | null | test/test_paged_alert_with_stats.py | mdennehy/python-client | 4d9cfa32075a6a65d88a38fe9e72b282e87b8808 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.paged_alert_with_stats import PagedAlertWithStats # noqa: E501
from wavefront_api_client.rest import ApiException
class TestPagedAlertWithStats(unittest.TestCase):
"""PagedAlertWithStats unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPagedAlertWithStats(self):
"""Test PagedAlertWithStats"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.paged_alert_with_stats.PagedAlertWithStats() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 31.585366 | 409 | 0.741313 |
6a2421f3f87be3b965a92b1b9e18cf13a009f1fa | 18,338 | py | Python | muspinsim/spinsys.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | muspinsim/spinsys.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | muspinsim/spinsys.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | """spinsys.py
A class to hold a given spin system, defined by specific nuclei
"""
import logging
import numpy as np
from numbers import Number
import scipy.constants as cnst
from muspinsim.utils import Clonable
from muspinsim.spinop import SpinOperator
from muspinsim.hamiltonian import Hamiltonian
from muspinsim.lindbladian import Lindbladian
from muspinsim.constants import gyromagnetic_ratio, spin, quadrupole_moment, EFG_2_MHZ
class InteractionTerm(Clonable):
def __init__(self, spinsys, indices=[], tensor=0, label=None):
self._spinsys = spinsys
self._indices = np.array(indices)
self._tensor = np.array(tensor)
self._label = "Term" if label is None else label
if np.any(np.array(self._tensor.shape) != 3):
raise ValueError("Tensor is not fully three-dimensional")
self._recalc_operator()
def _recalc_operator(self):
total_op = None
d = len(self._tensor.shape)
if d > 0:
index_tuples = np.indices(self._tensor.shape).reshape((d, -1)).T
else:
index_tuples = [[]]
for ii in index_tuples:
op = (
self._spinsys.operator(
{ind: "xyz"[ii[i]] for i, ind in enumerate(self._indices)}
)
* self._tensor[tuple(ii)]
)
if total_op is None:
total_op = op
else:
total_op += op
self._operator = total_op
@property
def label(self):
return self._label
@property
def indices(self):
return tuple(self._indices)
@property
def tensor(self):
return np.array(self._tensor)
@property
def operator(self):
return self._operator.clone()
@property
def matrix(self):
return self._operator.matrix
def __repr__(self):
return self.label
class SingleTerm(InteractionTerm):
def __init__(self, spinsys, i, vector, label="Single"):
super(SingleTerm, self).__init__(spinsys, [i], vector, label)
@property
def i(self):
return self._indices[0]
def rotate(self, rotmat):
R = np.array(rotmat)
v = self._tensor
v = np.dot(v, R.T)
rt = SingleTerm(self._spinsys, self.i, v, self._label)
return rt
def __repr__(self):
return "{0} {{ S_{1} * {2} }}".format(self._label, self.i, self._tensor)
class DoubleTerm(InteractionTerm):
def __init__(self, spinsys, i, j, matrix, label="Double"):
super(DoubleTerm, self).__init__(spinsys, [i, j], matrix, label)
@property
def i(self):
return self._indices[0]
@property
def j(self):
return self._indices[1]
def rotate(self, rotmat):
R = np.array(rotmat)
M = self._tensor
M = np.linalg.multi_dot([R, M, R.T])
rt = DoubleTerm(self._spinsys, self.i, self.j, M, self._label)
return rt
def __repr__(self):
return "{0} {{ S_{1} * [{2} {3} {4}] * S_{5} }}".format(
self._label, self.i, *self._tensor, self.j
)
class DissipationTerm(Clonable):
def __init__(self, operator, gamma=0.0):
self._op = operator
self._g = gamma
@property
def operator(self):
return self._op
@property
def gamma(self):
return self._g
@property
def tuple(self):
return (self._op, self._g)
class SpinSystem(Clonable):
def __init__(self, spins=[]):
"""Create a SpinSystem object
Create an object representing a system of particles with spins (muons,
electrons and atomic nuclei) and holding their operators.
Keyword Arguments:
spins {list} -- List of symbols representing the various particles.
Each element can be 'e' (electron), 'mu' (muon) a
chemical symbol, or a (str, int) tuple with a
chemical symbol and an isotope (default: {[]})
"""
gammas = []
Qs = []
Is = []
operators = []
for s in spins:
if isinstance(s, tuple):
el, iso = s
else:
el, iso = s, None
gammas.append(gyromagnetic_ratio(el, iso))
Qs.append(quadrupole_moment(el, iso))
Is.append(spin(el, iso))
opdict = {a: SpinOperator.from_axes(Is[-1], a) for a in "xyz+-0"}
operators.append(opdict)
self._spins = list(spins)
self._gammas = np.array(gammas)
self._Qs = np.array(Qs)
self._Is = np.array(Is)
self._dim = tuple((2 * self._Is + 1).astype(int))
self._operators = operators
self._terms = []
self._dissip_terms = []
snames = [
"{1}{0}".format(*s) if (type(s) == tuple) else str(s) for s in self._spins
]
logging.info("Created spin system with spins:")
logging.info("\t\t{0}".format(" ".join(snames)))
@property
def spins(self):
return list(self._spins)
@property
def gammas(self):
return self._gammas.copy()
@property
def Qs(self):
return self._Qs.copy()
@property
def Is(self):
return self._Is.copy()
@property
def dimension(self):
return self._dim
@property
def is_dissipative(self):
return (np.array(self._dissip_terms) != 0.0).any()
def add_term(self, indices, tensor, label="Term"):
"""Add to the spin system a generic interaction term
Add a term of the form T*S_i*S_j*S_k*..., where S_i is the vector of
the three spin operators:
[S_x, S_y, S_z]
for spin of index i.
Arguments:
indices {[int]} -- Indices of spins appearing in the term
tensor {ndarray} -- Tensor with n dimensions (n = len(indices)),
each of length 3, describing the interaction.
Keyword Arguments:
label {str} -- A label to name the term (default: {'Term'})
Returns:
term {InteractionTerm} -- The term just created
Raises:
ValueError -- Invalid index or vector
"""
for i in indices:
if i < 0 or i >= len(self._spins):
raise ValueError("Invalid index i")
tensor = np.array(tensor)
term = InteractionTerm(self, indices, tensor, label=label)
self._terms.append(term)
return term
def add_linear_term(self, i, vector, label="Single"):
"""Add to the spin system a term linear in one spin
Add a term of the form v*S_i, where S_i is the vector of the three
spin operators:
[S_x, S_y, S_z]
for spin of index i.
Arguments:
i {int} -- Index of the spin
vector {ndarray} -- Vector v
Keyword Arguments:
label {str} -- A label to name the term (default: {'Single'})
Returns:
SingleTerm -- The term just created
Raises:
ValueError -- Invalid index or vector
"""
if i < 0 or i >= len(self._spins):
raise ValueError("Invalid index i")
vector = np.array(vector)
term = SingleTerm(self, i, vector, label=label)
self._terms.append(term)
return term
def add_bilinear_term(self, i, j, matrix, label="Double"):
"""Add to the spin system a term bilinear in two spins
Add a term of the form S_i*M*S_j, where S_i is the vector of the three
spin operators:
[S_x, S_y, S_z]
for spin of index i, and same for S_j.
Arguments:
i {int} -- Index of first spin
j {int} -- Index of second spin
matrix {ndarray} -- Matrix M
Keyword Arguments:
label {str} -- A label to name the term (default: {'Double'})
Returns:
DoubleTerm -- The term just created
Raises:
ValueError -- Invalid index or vector
"""
if i < 0 or i >= len(self._spins):
raise ValueError("Invalid index i")
if j < 0 or j >= len(self._spins):
raise ValueError("Invalid index j")
matrix = np.array(matrix)
term = DoubleTerm(self, i, j, matrix, label=label)
self._terms.append(term)
return term
def add_zeeman_term(self, i, B):
"""Add a zeeman term
Add a single term coupling a given spin to a magnetic field
Arguments:
i {int} -- Index of the spin
B {ndarray | number} -- Magnetic field vector, in Tesla. If just a
scalar is assumed to be along z
Returns:
SingleTerm -- The term just created
"""
if isinstance(B, Number):
B = [0, 0, B] # Treat it as along z by default
B = np.array(B)
logging.info("Adding Zeeman term to spin {0}".format(i + 1))
return self.add_linear_term(i, B * self.gamma(i), "Zeeman")
def add_dipolar_term(self, i, j, r):
"""Add a dipolar term
Add a spin-spin dipolar coupling between two distinct spins. The
coupling is calculated geometrically from the vector connecting them,
in Angstrom.
Arguments:
i {int} -- Index of the first spin
j {int} -- Index of the second spin
r {ndarray} -- Vector connecting the two spins (in Angstrom)
Returns:
DoubleTerm -- The term just created
Raises:
ValueError -- Raised if i == j
"""
if i == j:
raise ValueError("Can not set up dipolar coupling with itself")
r = np.array(r)
g_i = self.gamma(i)
g_j = self.gamma(j)
rnorm = np.linalg.norm(r)
D = -(np.eye(3) - 3.0 / rnorm**2.0 * r[:, None] * r[None, :])
dij = -(cnst.mu_0 * cnst.hbar * (g_i * g_j * 1e6)) / (
2 * (rnorm * 1e-10) ** 3
) # MHz
D *= dij
logging.info("Adding dipolar term to spins {0}-{1}".format(i + 1, j + 1))
return self.add_bilinear_term(i, j, D, "Dipolar")
def add_quadrupolar_term(self, i, EFG):
"""Add a quadrupolar term
Add a quadrupolar term to a nucleus with I >= 1 from its Electric
Field Gradient tensor.
Arguments:
i {int} -- Index of the spin
EFG {ndarray} -- Electric Field Gradient tensor
Returns:
DoubleTerm -- The term just created
"""
EFG = np.array(EFG)
Q = self.Q(i)
I = self.I(i)
if I == 0.5:
raise ValueError(
"Can not set up quadrupolar coupling for " "spin 1/2 particle"
)
Qtens = EFG_2_MHZ * Q / (2 * I * (2 * I - 1)) * EFG
logging.info("Adding quadrupolar term to spin {0}".format(i + 1))
return self.add_bilinear_term(i, i, Qtens, "Quadrupolar")
def remove_term(self, term):
"""Remove a term from the spin system
Remove an interaction term from this spin system.
Arguments:
term {InteractionTerm} -- Term to remove
Raises:
ValueError -- The term is not contained in this system
"""
self._terms.remove(term)
def clear_terms(self):
"""Remove all terms
Remove all interaction terms from this spin system.
"""
terms = list(self._terms)
for t in terms:
self.remove_term(t)
def add_dissipative_term(self, op, d=0.0):
"""Set a dissipation operator for the system.
Set a dissipation operator for this system, representing its coupling
(in MHz) with an external heat bath to include in the Lindbladian of
the system.
Arguments:
op {SpinOperator} -- Operator for the dissipation term
Keyword Arguments:
d {number} -- Dissipation coupling in MHz (default: {0.0})
"""
term = DissipationTerm(op, d)
self._dissip_terms.append(term)
return term
def remove_dissipative_term(self, term):
"""Remove a dissipation term from the system.
Remove a dissipation term from this spin system.
Arguments:
term {DissipationTerm} -- Term to remove
Raises:
ValueError -- The term is not contained in this system
"""
self._dissip_terms.remove(term)
def clear_dissipative_terms(self):
"""Remove all terms
Remove all dissipative terms from this spin system.
"""
dterms = list(self._dissip_terms)
for t in dterms:
self.remove_dissipative_term(t)
def gamma(self, i):
"""Returns the gyromagnetic ratio of a given particle
Arguments:
i {int} -- Index of the particle
Returns:
float -- Gyromagnetic ratio in MHz/T
"""
return self._gammas[i]
def Q(self, i):
"""Returns the quadrupole moment of a given particle
Arguments:
i {int} -- Index of the particle
Returns:
float -- Quadrupole moment in Barn
"""
return self._Qs[i]
def I(self, i):
"""Returns the spin of a given particle
Arguments:
i {int} -- Index of the particle
Returns:
float -- Spin in units of hbar
"""
return self._Is[i]
def operator(self, terms={}):
"""Return an operator for this spin system
Return a SpinOperator for this system containing the specified terms.
Keyword Arguments:
terms {dict} -- A dictionary of terms to include. The keys should
indices of particles and the values should be
symbols indicating one spin operator (either x, y,
z, +, - or 0). Wherever not specified, the identity
operaror is applied (default: {{}})
Returns:
SpinOperator -- The requested operator
"""
ops = [self._operators[i][terms.get(i, "0")] for i in range(len(self))]
M = ops[0]
for i in range(1, len(ops)):
M = M.kron(ops[i])
return M
def rotate(self, rotmat=np.eye(3)):
# Trying to avoid pointlessly cloning the terms
terms = self._terms
self._terms = []
# Make a clone
rssys = self.clone()
self._terms = terms
# Edit the terms
try:
rssys._terms = [t.rotate(rotmat) for t in terms]
except AttributeError:
raise RuntimeError(
"Can only rotate SpinSystems containing Single" " or Double terms"
)
return rssys
@property
def hamiltonian(self):
if len(self._terms) == 0:
n = np.prod(self.dimension)
H = np.zeros((n, n))
else:
H = np.sum([t.matrix for t in self._terms], axis=0)
H = Hamiltonian(H, dim=self.dimension)
return H
@property
def lindbladian(self):
H = self.hamiltonian
dops = [t.tuple for t in self._dissip_terms]
L = Lindbladian.from_hamiltonian(H, dops)
return L
def __len__(self):
return len(self._gammas)
class MuonSpinSystem(SpinSystem):
def __init__(self, spins=["mu", "e"]):
super(MuonSpinSystem, self).__init__(spins)
# Identify the muon index
if self._spins.count("mu") != 1:
raise ValueError(
"Spins passed to MuonSpinSystem must contain" " exactly one muon"
)
self._mu_i = self._spins.index("mu")
self._e_i = set([i for i, s in enumerate(self.spins) if s == "e"])
# For convenience, store the operators for the muon
self._mu_ops = [self.operator({self._mu_i: e}) for e in "xyz"]
@property
def muon_index(self):
return self._mu_i
@property
def elec_indices(self):
return self._e_i
def add_hyperfine_term(self, i, A, j=None):
"""Add a hyperfine term
Add a hyperfine term for a given spin, provided that an electron is
present.
Arguments:
i {int} -- Index of the spin (must be different from electron)
A {[type]} -- Hyperfine tensor (in MHz)
j {int} -- Index of the electron spin. If not specified uses the
one that is present, if there is one (default: None)
Returns:
DoubleTerm -- The term just created
Raises:
ValueError -- Invalid index
"""
elec_i = self.elec_indices
if j is None:
if len(elec_i) > 1:
raise ValueError(
"Must specify an electron index in system "
"with multiple electrons"
)
else:
j = list(elec_i)[0]
else:
if j not in elec_i:
raise ValueError(
"Second index in hyperfine coupling must" " refer to an electron"
)
if i in elec_i:
raise ValueError(
"First index in hyperfine coupling must" " not refer to an electron"
)
logging.info("Adding hyperfine term to spins {0}-{1}".format(i + 1, j + 1))
return self.add_bilinear_term(i, j, A, "Hyperfine")
def muon_operator(self, v):
"""Get a muon operator
Get a single operator for the muon, given a vector representing its
direction. Uses precalculated operators for speed.
Arguments:
v {[float]} -- 3-dimensional vector representing the directions of
the desired operator
Returns:
mu_op {SpinOperator} -- Requested operator
Raises:
ValueError -- Invalid length of v
"""
if len(v) != 3:
raise ValueError(
"Vector passed to muon_operator must be three" " dimensional"
)
op = [x * self._mu_ops[i] for i, x in enumerate(v)]
op = sum(op[1:], op[0])
return op
| 26.65407 | 86 | 0.552841 |
df8cc1a478e941024e150e596913f2f6a96dc72b | 11,229 | py | Python | arrow/parser.py | czechben/arrow | c67fb5c26dc646536d0a81752568fcdc761a9076 | [
"Apache-2.0"
] | null | null | null | arrow/parser.py | czechben/arrow | c67fb5c26dc646536d0a81752568fcdc761a9076 | [
"Apache-2.0"
] | 3 | 2018-09-28T08:30:05.000Z | 2018-09-28T08:56:55.000Z | arrow/parser.py | pierorex/arrow | 2d1a8a4f0b699c54a52d62622281a061b86aadac | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
from dateutil import tz
import re
try:
from functools import lru_cache
except ImportError: # pragma: no cover
from backports.functools_lru_cache import lru_cache # pragma: no cover
from arrow import locales
class ParserError(RuntimeError):
pass
class DateTimeParser(object):
_FORMAT_RE = re.compile('(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|X)')
_ESCAPE_RE = re.compile('\[[^\[\]]*\]')
_ONE_OR_MORE_DIGIT_RE = re.compile('\d+')
_ONE_OR_TWO_DIGIT_RE = re.compile('\d{1,2}')
_FOUR_DIGIT_RE = re.compile('\d{4}')
_TWO_DIGIT_RE = re.compile('\d{2}')
_TZ_RE = re.compile('[+\-]?\d{2}:?(\d{2})?')
_TZ_NAME_RE = re.compile('\w[\w+\-/]+')
_BASE_INPUT_RE_MAP = {
'YYYY': _FOUR_DIGIT_RE,
'YY': _TWO_DIGIT_RE,
'MM': _TWO_DIGIT_RE,
'M': _ONE_OR_TWO_DIGIT_RE,
'DD': _TWO_DIGIT_RE,
'D': _ONE_OR_TWO_DIGIT_RE,
'HH': _TWO_DIGIT_RE,
'H': _ONE_OR_TWO_DIGIT_RE,
'hh': _TWO_DIGIT_RE,
'h': _ONE_OR_TWO_DIGIT_RE,
'mm': _TWO_DIGIT_RE,
'm': _ONE_OR_TWO_DIGIT_RE,
'ss': _TWO_DIGIT_RE,
's': _ONE_OR_TWO_DIGIT_RE,
'X': re.compile('\d+'),
'ZZZ': _TZ_NAME_RE,
'ZZ': _TZ_RE,
'Z': _TZ_RE,
'S': _ONE_OR_MORE_DIGIT_RE,
}
MARKERS = ['YYYY', 'MM', 'DD']
SEPARATORS = ['-', '/', '.']
def __init__(self, locale='en_us', cache_size=0):
self.locale = locales.get_locale(locale)
self._input_re_map = self._BASE_INPUT_RE_MAP.copy()
self._input_re_map.update({
'MMMM': self._choice_re(self.locale.month_names[1:], re.IGNORECASE),
'MMM': self._choice_re(self.locale.month_abbreviations[1:],
re.IGNORECASE),
'Do': re.compile(self.locale.ordinal_day_re),
'dddd': self._choice_re(self.locale.day_names[1:], re.IGNORECASE),
'ddd': self._choice_re(self.locale.day_abbreviations[1:],
re.IGNORECASE),
'd': re.compile(r"[1-7]"),
'a': self._choice_re(
(self.locale.meridians['am'], self.locale.meridians['pm'])
),
# note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to
# ensure backwards compatibility of this token
'A': self._choice_re(self.locale.meridians.values())
})
if cache_size > 0:
self._generate_pattern_re =\
lru_cache(maxsize=cache_size)(self._generate_pattern_re)
def parse_iso(self, string):
has_time = 'T' in string or ' ' in string.strip()
space_divider = ' ' in string.strip()
if has_time:
if space_divider:
date_string, time_string = string.split(' ', 1)
else:
date_string, time_string = string.split('T', 1)
time_parts = re.split('[+-]', time_string, 1)
has_tz = len(time_parts) > 1
has_seconds = time_parts[0].count(':') > 1
has_subseconds = re.search('[.,]', time_parts[0])
if has_subseconds:
formats = ['YYYY-MM-DDTHH:mm:ss%sS' % has_subseconds.group()]
elif has_seconds:
formats = ['YYYY-MM-DDTHH:mm:ss']
else:
formats = ['YYYY-MM-DDTHH:mm']
else:
has_tz = False
# generate required formats: YYYY-MM-DD, YYYY-MM-DD, YYYY
# using various separators: -, /, .
l = len(self.MARKERS)
formats = [separator.join(self.MARKERS[:l-i])
for i in range(l)
for separator in self.SEPARATORS]
if has_time and has_tz:
formats = [f + 'Z' for f in formats]
if space_divider:
formats = [item.replace('T', ' ', 1) for item in formats]
return self._parse_multiformat(string, formats)
def _generate_pattern_re(self, fmt):
# fmt is a string of tokens like 'YYYY-MM-DD'
# we construct a new string by replacing each
# token by its pattern:
# 'YYYY-MM-DD' -> '(?P<YYYY>\d{4})-(?P<MM>\d{2})-(?P<DD>\d{2})'
tokens = []
offset = 0
# Escape all special RegEx chars
escaped_fmt = re.escape(fmt)
# Extract the bracketed expressions to be reinserted later.
escaped_fmt = re.sub(self._ESCAPE_RE, "#", escaped_fmt)
# Any number of S is the same as one.
escaped_fmt = re.sub('S+', 'S', escaped_fmt)
escaped_data = re.findall(self._ESCAPE_RE, fmt)
fmt_pattern = escaped_fmt
for m in self._FORMAT_RE.finditer(escaped_fmt):
token = m.group(0)
try:
input_re = self._input_re_map[token]
except KeyError:
raise ParserError('Unrecognized token \'{0}\''.format(token))
input_pattern = '(?P<{0}>{1})'.format(token, input_re.pattern)
tokens.append(token)
# a pattern doesn't have the same length as the token
# it replaces! We keep the difference in the offset variable.
# This works because the string is scanned left-to-right and matches
# are returned in the order found by finditer.
fmt_pattern = fmt_pattern[:m.start() + offset] + input_pattern + fmt_pattern[m.end() + offset:]
offset += len(input_pattern) - (m.end() - m.start())
final_fmt_pattern = ""
a = fmt_pattern.split("\#")
b = escaped_data
# Due to the way Python splits, 'a' will always be longer
for i in range(len(a)):
final_fmt_pattern += a[i]
if i < len(b):
final_fmt_pattern += b[i][1:-1]
return tokens, re.compile(final_fmt_pattern, flags=re.IGNORECASE)
def parse(self, string, fmt):
if isinstance(fmt, list):
return self._parse_multiformat(string, fmt)
fmt_tokens, fmt_pattern_re = self._generate_pattern_re(fmt)
match = fmt_pattern_re.search(string)
if match is None:
raise ParserError('Failed to match \'{0}\' when parsing \'{1}\''
.format(fmt_pattern_re.pattern, string))
parts = {}
for token in fmt_tokens:
if token == 'Do':
value = match.group('value')
else:
value = match.group(token)
self._parse_token(token, value, parts)
return self._build_datetime(parts)
def _parse_token(self, token, value, parts):
if token == 'YYYY':
parts['year'] = int(value)
elif token == 'YY':
value = int(value)
parts['year'] = 1900 + value if value > 68 else 2000 + value
elif token in ['MMMM', 'MMM']:
parts['month'] = self.locale.month_number(value.lower())
elif token in ['MM', 'M']:
parts['month'] = int(value)
elif token in ['DD', 'D']:
parts['day'] = int(value)
elif token in ['Do']:
parts['day'] = int(value)
elif token.upper() in ['HH', 'H']:
parts['hour'] = int(value)
elif token in ['mm', 'm']:
parts['minute'] = int(value)
elif token in ['ss', 's']:
parts['second'] = int(value)
elif token == 'S':
# We have the *most significant* digits of an arbitrary-precision integer.
# We want the six most significant digits as an integer, rounded.
# FIXME: add nanosecond support somehow?
value = value.ljust(7, str('0'))
# floating-point (IEEE-754) defaults to half-to-even rounding
seventh_digit = int(value[6])
if seventh_digit == 5:
rounding = int(value[5]) % 2
elif seventh_digit > 5:
rounding = 1
else:
rounding = 0
parts['microsecond'] = int(value[:6]) + rounding
elif token == 'X':
parts['timestamp'] = int(value)
elif token in ['ZZZ', 'ZZ', 'Z']:
parts['tzinfo'] = TzinfoParser.parse(value)
elif token in ['a', 'A']:
if value in (
self.locale.meridians['am'],
self.locale.meridians['AM']
):
parts['am_pm'] = 'am'
elif value in (
self.locale.meridians['pm'],
self.locale.meridians['PM']
):
parts['am_pm'] = 'pm'
@staticmethod
def _build_datetime(parts):
timestamp = parts.get('timestamp')
if timestamp:
tz_utc = tz.tzutc()
return datetime.fromtimestamp(timestamp, tz=tz_utc)
am_pm = parts.get('am_pm')
hour = parts.get('hour', 0)
if am_pm == 'pm' and hour < 12:
hour += 12
elif am_pm == 'am' and hour == 12:
hour = 0
return datetime(year=parts.get('year', 1), month=parts.get('month', 1),
day=parts.get('day', 1), hour=hour, minute=parts.get('minute', 0),
second=parts.get('second', 0), microsecond=parts.get('microsecond', 0),
tzinfo=parts.get('tzinfo'))
def _parse_multiformat(self, string, formats):
_datetime = None
for fmt in formats:
try:
_datetime = self.parse(string, fmt)
break
except ParserError:
pass
if _datetime is None:
raise ParserError('Could not match input to any of {0} on \'{1}\''.format(formats, string))
return _datetime
@staticmethod
def _map_lookup(input_map, key):
try:
return input_map[key]
except KeyError:
raise ParserError('Could not match "{0}" to {1}'.format(key, input_map))
@staticmethod
def _try_timestamp(string):
try:
return float(string)
except:
return None
@staticmethod
def _choice_re(choices, flags=0):
return re.compile('({0})'.format('|'.join(choices)), flags=flags)
class TzinfoParser(object):
_TZINFO_RE = re.compile('([+\-])?(\d\d):?(\d\d)?')
@classmethod
def parse(cls, string):
tzinfo = None
if string == 'local':
tzinfo = tz.tzlocal()
elif string in ['utc', 'UTC']:
tzinfo = tz.tzutc()
else:
iso_match = cls._TZINFO_RE.match(string)
if iso_match:
sign, hours, minutes = iso_match.groups()
if minutes is None:
minutes = 0
seconds = int(hours) * 3600 + int(minutes) * 60
if sign == '-':
seconds *= -1
tzinfo = tz.tzoffset(None, seconds)
else:
tzinfo = tz.gettz(string)
if tzinfo is None:
raise ParserError('Could not parse timezone expression "{0}"'.format(string))
return tzinfo
| 32.267241 | 107 | 0.535934 |
af0393c5af2b67751012663bc41e7fdb6bdf64ee | 5,055 | py | Python | data_mining/etl.py | PalmerTurley34/DS | a7c8178c3af9804749d1b9a6acd2f6ba5abe8c5b | [
"MIT"
] | null | null | null | data_mining/etl.py | PalmerTurley34/DS | a7c8178c3af9804749d1b9a6acd2f6ba5abe8c5b | [
"MIT"
] | 2 | 2020-08-21T03:20:38.000Z | 2020-08-27T01:04:56.000Z | data_mining/etl.py | PalmerTurley34/DS | a7c8178c3af9804749d1b9a6acd2f6ba5abe8c5b | [
"MIT"
] | 3 | 2020-08-20T02:06:48.000Z | 2020-08-27T03:37:12.000Z | import os
import sys
import sqlite3
import glob
import zipfile
from urllib.request import urlretrieve
import pandas as pd
class KickstartDatabase():
def __init__(self, db_connection, columns):
self.connection = db_connection
self.common_columns = columns
def reset_db(self):
'''
Reset database with the initial contents from raw_data.csv
'''
# Retrieve CSV into dataframe and filter to just selected columns
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../data_model/data/raw_data.csv')
df = pd.read_csv(filename)
df = df[self.common_columns]
# Create table in database, dropping if previously exists
df.to_sql('kickstart', self.connection, if_exists='replace',
index=False)
def update_db(self, zip_url):
'''
Retireve new datasets from https://webrobots.io/kickstarter-datasets/.
In each dataset there is a zip file with individual CSV files. Open
each and concatenate to the current data from the database, without
introducing duplicates.
We totally rewrite the DB table to also insure there are no duplicates
from previous updates.
Arguments:
zip_url: the url to the zip file from
https://webrobots.io/kickstarter-datasets/
'''
# Retrieve current data from database
df = pd.read_sql('SELECT * FROM kickstart', self.connection)
# Obtain the zip file with new data
print('Fetching zip file...')
csv_files = urlretrieve(zip_url)
dirname = os.path.dirname(__file__)
filepath = os.path.join(dirname, 'csv_files')
# unzip into a local temp directory
print('Extracting CSV files...')
with zipfile.ZipFile(csv_files[0], 'r') as zip_ref:
zip_ref.extractall(filepath)
# process each CSV file
filepath = os.path.join(dirname, 'csv_files/*.csv')
count = 1
for filename in glob.glob(filepath):
print(f'Processing CSV file {count}\r', end='')
# load and filter
new_df = pd.read_csv(filename)
# We'll drop any that are live or suspended, and get them
# later when they have a final status
cond = (new_df['state'] != 'live') & \
(new_df['state'] != 'suspended')
new_df = new_df[cond]
# Convert to binary status
new_df['state'] = new_df['state'].replace({'failed': 0,
'canceled': 0,
'successful': 1})
# Convert columns and filter
new_df = new_df.rename(columns={'blurb': 'desc',
'state': 'final_status'})
new_df = new_df[self.common_columns]
# Remove used CSV file
os.remove(filename)
count += 1
# concatenate to dataframe
df = pd.concat([df, new_df], sort=False). \
drop_duplicates(subset=['name'])
# Update database, dropping an replacing table with larger dataframe
print('\nWriting database...')
df.to_sql('kickstart', self.connection, if_exists='replace',
index=False)
if __name__ == '__main__':
common_columns = ['deadline',
'created_at',
'backers_count',
'launched_at',
'disable_communication',
'currency',
'name',
'country',
'goal',
'final_status',
'state_changed_at',
'desc']
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../data_model/data/kickstart.sqlite')
connection = sqlite3.Connection(filename)
ks_db = KickstartDatabase(connection, common_columns)
if len(sys.argv) == 1:
print('\'python etl.py reset\' to reset database')
print('\'python etl.py update <file list>\' to add data')
elif sys.argv[1] == 'reset':
print('Resetting database to original dataset...')
ks_db.reset_db()
elif sys.argv[1]:
years = sys.argv[2:]
for year in years:
print(f'Collecting {year}')
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, f'urls/{year}.txt')
with open(filename) as fp:
Lines = fp.readlines()
count = 1
for line in Lines:
print(f'Processing file {count}')
ks_db.update_db(line)
count += 1
else:
print('Unrecognized parameter')
print('\'python etl.py reset\' to reset database')
print('\'python etl.py update <file list>\' to add data')
print('Done!\n')
connection.close()
| 34.623288 | 78 | 0.551335 |
a07d5750a5fdbe21acb1678a9def7d8d315fbc36 | 9,836 | py | Python | env/lib/python3.6/site-packages/openpyxl/writer/excel.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 34 | 2018-07-13T11:30:46.000Z | 2022-01-05T13:48:10.000Z | env/lib/python3.6/site-packages/openpyxl/writer/excel.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 4 | 2021-03-11T04:02:00.000Z | 2022-03-27T08:31:56.000Z | env/lib/python3.6/site-packages/openpyxl/writer/excel.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 22 | 2018-07-13T11:30:48.000Z | 2021-09-25T13:30:08.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
"""Write a .xlsx file."""
# Python stdlib imports
from io import BytesIO
import re
from zipfile import ZipFile, ZIP_DEFLATED
# package imports
from openpyxl.utils.exceptions import InvalidFileException
from openpyxl.xml.constants import (
ARC_SHARED_STRINGS,
ARC_CONTENT_TYPES,
ARC_ROOT_RELS,
ARC_WORKBOOK_RELS,
ARC_APP, ARC_CORE,
ARC_THEME,
ARC_STYLE,
ARC_WORKBOOK,
PACKAGE_WORKSHEETS,
PACKAGE_CHARTSHEETS,
PACKAGE_DRAWINGS,
PACKAGE_CHARTS,
PACKAGE_IMAGES,
PACKAGE_XL
)
from openpyxl.drawing.spreadsheet_drawing import SpreadsheetDrawing
from openpyxl.xml.functions import tostring, fromstring, Element
from openpyxl.packaging.manifest import Manifest
from openpyxl.packaging.relationship import (
get_rels_path,
RelationshipList,
Relationship,
)
from openpyxl.packaging.extended import ExtendedProperties
from openpyxl.writer.strings import write_string_table
from openpyxl.writer.workbook import (
write_root_rels,
write_workbook_rels,
write_workbook,
)
from openpyxl.writer.theme import write_theme
from openpyxl.writer.worksheet import write_worksheet
from openpyxl.styles.stylesheet import write_stylesheet
from openpyxl.comments.comment_sheet import CommentSheet
class ExcelWriter(object):
"""Write a workbook object to an Excel file."""
def __init__(self, workbook, archive):
self._archive = archive
self.workbook = workbook
self.manifest = Manifest()
self.vba_modified = set()
self._tables = []
self._charts = []
self._images = []
self._drawings = []
self._comments = []
self._pivots = []
def write_data(self):
"""Write the various xml files into the zip archive."""
# cleanup all worksheets
archive = self._archive
archive.writestr(ARC_ROOT_RELS, write_root_rels(self.workbook))
props = ExtendedProperties()
archive.writestr(ARC_APP, tostring(props.to_tree()))
archive.writestr(ARC_CORE, tostring(self.workbook.properties.to_tree()))
if self.workbook.loaded_theme:
archive.writestr(ARC_THEME, self.workbook.loaded_theme)
else:
archive.writestr(ARC_THEME, write_theme())
self._write_worksheets()
self._write_chartsheets()
self._write_images()
self._write_charts()
self._archive.writestr(ARC_SHARED_STRINGS,
write_string_table(self.workbook.shared_strings))
self._write_external_links()
stylesheet = write_stylesheet(self.workbook)
archive.writestr(ARC_STYLE, tostring(stylesheet))
archive.writestr(ARC_WORKBOOK, write_workbook(self.workbook))
archive.writestr(ARC_WORKBOOK_RELS, write_workbook_rels(self.workbook))
self._merge_vba()
self.manifest._write(archive, self.workbook)
def _merge_vba(self):
"""
If workbook contains macros then extract associated files from cache
of old file and add to archive
"""
ARC_VBA = re.compile("|".join(
('xl/vba', r'xl/drawings/.*vmlDrawing\d\.vml',
'xl/ctrlProps', 'customUI', 'xl/activeX', r'xl/media/.*\.emf')
)
)
if self.workbook.vba_archive:
for name in set(self.workbook.vba_archive.namelist()) - self.vba_modified:
if ARC_VBA.match(name):
self._archive.writestr(name, self.workbook.vba_archive.read(name))
def _write_images(self):
# delegate to object
for img in self._images:
self._archive.writestr(img.path[1:], img._data())
def _write_charts(self):
# delegate to object
if len(self._charts) != len(set(self._charts)):
raise InvalidFileException("The same chart cannot be used in more than one worksheet")
for chart in self._charts:
self._archive.writestr(chart.path[1:], tostring(chart._write()))
self.manifest.append(chart)
def _write_drawing(self, drawing):
"""
Write a drawing
"""
self._drawings.append(drawing)
drawing._id = len(self._drawings)
for chart in drawing.charts:
self._charts.append(chart)
chart._id = len(self._charts)
for img in drawing.images:
self._images.append(img)
img._id = len(self._images)
rels_path = get_rels_path(drawing.path)[1:]
self._archive.writestr(drawing.path[1:], tostring(drawing._write()))
self._archive.writestr(rels_path, tostring(drawing._write_rels()))
self.manifest.append(drawing)
def _write_chartsheets(self):
for idx, sheet in enumerate(self.workbook.chartsheets, 1):
sheet._id = idx
xml = tostring(sheet.to_tree())
self._archive.writestr(sheet.path[1:], xml)
self.manifest.append(sheet)
if sheet._drawing:
self._write_drawing(sheet._drawing)
rel = Relationship(type="drawing", Target=sheet._drawing.path)
rels = RelationshipList()
rels.append(rel)
tree = rels.to_tree()
rels_path = get_rels_path(sheet.path[1:])
self._archive.writestr(rels_path, tostring(tree))
def _write_comment(self, ws):
cs = CommentSheet.from_comments(ws._comments)
self._comments.append(cs)
cs._id = len(self._comments)
self._archive.writestr(cs.path[1:], tostring(cs.to_tree()))
self.manifest.append(cs)
if ws.legacy_drawing is None:
ws.legacy_drawing = 'xl/drawings/commentsDrawing{0}.vml'.format(cs._id)
vml = None
else:
vml = fromstring(self.workbook.vba_archive.read(ws.legacy_drawing))
vml = cs.write_shapes(vml)
self._archive.writestr(ws.legacy_drawing, vml)
self.vba_modified.add(ws.legacy_drawing)
comment_rel = Relationship(Id="comments", type=cs._rel_type, Target=cs.path)
ws._rels.append(comment_rel)
def _write_worksheets(self):
pivot_caches = set()
for idx, ws in enumerate(self.workbook.worksheets, 1):
ws._id = idx
xml = ws._write()
rels_path = get_rels_path(ws.path)[1:]
self._archive.writestr(ws.path[1:], xml)
self.manifest.append(ws)
if ws._drawing:
self._write_drawing(ws._drawing)
for r in ws._rels.Relationship:
if "drawing" in r.Type:
r.Target = ws._drawing.path
if ws._comments:
self._write_comment(ws)
if ws.legacy_drawing is not None:
shape_rel = Relationship(type="vmlDrawing", Id="anysvml",
Target="/" + ws.legacy_drawing)
ws._rels.append(shape_rel)
for t in ws._tables:
self._tables.append(t)
t.id = len(self._tables)
t._write(self._archive)
self.manifest.append(t)
ws._rels[t._rel_id].Target = t.path
for p in ws._pivots:
if p.cache not in pivot_caches:
pivot_caches.add(p.cache)
p.cache._id = len(pivot_caches)
self._pivots.append(p)
p._id = len(self._pivots)
p._write(self._archive, self.manifest)
self.workbook._pivots.append(p)
r = Relationship(Type=p.rel_type, Target=p.path)
ws._rels.append(r)
if ws._rels:
tree = ws._rels.to_tree()
self._archive.writestr(rels_path, tostring(tree))
def _write_external_links(self):
# delegate to object
"""Write links to external workbooks"""
wb = self.workbook
for idx, link in enumerate(wb._external_links, 1):
link._id = idx
rels_path = get_rels_path(link.path[1:])
xml = link.to_tree()
self._archive.writestr(link.path[1:], tostring(xml))
rels = RelationshipList()
rels.append(link.file_link)
self._archive.writestr(rels_path, tostring(rels.to_tree()))
self.manifest.append(link)
def save(self, filename):
"""Write data into the archive."""
self.write_data()
self._archive.close()
def save_workbook(workbook, filename,):
"""Save the given workbook on the filesystem under the name filename.
:param workbook: the workbook to save
:type workbook: :class:`openpyxl.workbook.Workbook`
:param filename: the path to which save the workbook
:type filename: string
:rtype: bool
"""
archive = ZipFile(filename, 'w', ZIP_DEFLATED, allowZip64=True)
writer = ExcelWriter(workbook, archive)
writer.save(filename)
return True
def save_virtual_workbook(workbook,):
"""Return an in-memory workbook, suitable for a Django response."""
temp_buffer = BytesIO()
archive = ZipFile(temp_buffer, 'w', ZIP_DEFLATED, allowZip64=True)
writer = ExcelWriter(workbook, archive)
try:
writer.write_data()
finally:
archive.close()
virtual_workbook = temp_buffer.getvalue()
temp_buffer.close()
return virtual_workbook
def save_dump(workbook, filename):
"""
Save a write-only workbook
"""
archive = ZipFile(filename, 'w', ZIP_DEFLATED, allowZip64=True)
if workbook.worksheets == []:
workbook.create_sheet()
writer = ExcelWriter(workbook, archive)
writer.save(filename)
return True
| 31.225397 | 98 | 0.622509 |
9c66c5b40e1470d881ed05ac0c0b9c068579cd24 | 407 | py | Python | src/entities/vegetations/three.py | Izoniks-prog/Dreams-Island | 062dd1c5cca102b304642e278c155ce21ce971b2 | [
"MIT"
] | 1 | 2021-02-17T20:47:29.000Z | 2021-02-17T20:47:29.000Z | src/entities/vegetations/three.py | Izoniks-prog/Dreams-Island | 062dd1c5cca102b304642e278c155ce21ce971b2 | [
"MIT"
] | null | null | null | src/entities/vegetations/three.py | Izoniks-prog/Dreams-Island | 062dd1c5cca102b304642e278c155ce21ce971b2 | [
"MIT"
] | null | null | null | import pygame
from src.entities.vegetations.vegetation import Vegetation
SIZE_X = 17
SIZE_Y = 16
class Three(Vegetation):
def __init__(self, x: int, y: int, path: str):
super().__init__(x, y, path)
self.image = pygame.image.load(path)
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = x * SIZE_X
self.rect.y = y * SIZE_Y
| 20.35 | 58 | 0.604423 |
625b77ef08faa2dd5966f2c98a08cfeef17afa71 | 189 | py | Python | acmicpc/9933/9933.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/9933/9933.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/9933/9933.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | N = int(input())
words = [input() for param in range(N)]
for word in words:
if word[::-1] in words:
print(len(word), end=' ')
print(word[len(word) // 2])
break
| 21 | 39 | 0.529101 |
3feb92bb605d010e2f487acca009c5150ef58061 | 18,616 | py | Python | yardstick/network_services/traffic_profile/traffic_profile.py | kkltcjk/yardstick-moon | de48b16d1385cc26e83f8886d148d642c59e3d64 | [
"Apache-2.0"
] | null | null | null | yardstick/network_services/traffic_profile/traffic_profile.py | kkltcjk/yardstick-moon | de48b16d1385cc26e83f8886d148d642c59e3d64 | [
"Apache-2.0"
] | null | null | null | yardstick/network_services/traffic_profile/traffic_profile.py | kkltcjk/yardstick-moon | de48b16d1385cc26e83f8886d148d642c59e3d64 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Trex Traffic Profile definitions """
from __future__ import absolute_import
import struct
import socket
import logging
from random import SystemRandom
import six
from yardstick.network_services.traffic_profile.base import TrafficProfile
from trex_stl_lib.trex_stl_client import STLStream
from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
from trex_stl_lib.trex_stl_streams import STLTXCont
from trex_stl_lib.trex_stl_streams import STLProfile
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
from trex_stl_lib import api as Pkt
SRC = 'src'
DST = 'dst'
ETHERNET = 'Ethernet'
IP = 'IP'
IPv6 = 'IPv6'
UDP = 'UDP'
DSCP = 'DSCP'
SRC_PORT = 'sport'
DST_PORT = 'dport'
TYPE_OF_SERVICE = 'tos'
class TrexProfile(TrafficProfile):
""" This class handles Trex Traffic profile generation and execution """
PROTO_MAP = {
ETHERNET: ('ether_packet', Pkt.Ether),
IP: ('ip_packet', Pkt.IP),
IPv6: ('ip6_packet', Pkt.IPv6),
UDP: ('udp_packet', Pkt.UDP),
}
def _general_single_action_partial(self, protocol):
def f(field):
def partial(value):
kwargs = {
field: value
}
self._set_proto_fields(protocol, **kwargs)
return partial
return f
def _ethernet_range_action_partial(self, direction, _):
def partial(min_value, max_value):
stl_vm_flow_var = STLVmFlowVar(name="mac_{}".format(direction),
min_value=1,
max_value=30,
size=4,
op='inc',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_{}'.format(direction),
pkt_offset='Ether.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def _ip_range_action_partial(self, direction, count=1):
def partial(min_value, max_value):
stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_{}".format(direction),
min_value=min_value,
max_value=max_value,
size=4,
limit=int(count),
seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_{}'.format(direction),
pkt_offset='IP.{}'.format(direction))
self.vm_flow_vars.append(stl_vm_wr_flow_var)
stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
self.vm_flow_vars.append(stl_vm_fix_ipv4)
return partial
def _ip6_range_action_partial(self, direction, _):
def partial(min_value, max_value):
min_value, max_value = self._get_start_end_ipv6(min_value, max_value)
stl_vm_flow_var = STLVmFlowVar(name="ip6_{}".format(direction),
min_value=min_value,
max_value=max_value,
size=8,
op='random',
step=1)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip6_{}'.format(direction),
pkt_offset='IPv6.{}'.format(direction),
offset_fixup=8)
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def _dscp_range_action_partial(self, *_):
def partial(min_value, max_value):
stl_vm_flow_var = STLVmFlowVar(name="dscp",
min_value=min_value,
max_value=max_value,
size=2,
op='inc',
step=8)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dscp',
pkt_offset='IP.tos')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
def _udp_range_action_partial(self, field, count=1):
def partial(min_value, max_value):
stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_{}".format(field),
min_value=min_value,
max_value=max_value,
size=2,
limit=int(count),
seed=0x1235)
self.vm_flow_vars.append(stl_vm_flow_var)
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_{}'.format(field),
pkt_offset=self.udp[field])
self.vm_flow_vars.append(stl_vm_wr_flow_var)
return partial
def __init__(self, yaml_data):
super(TrexProfile, self).__init__(yaml_data)
self.flows = 100
self.pps = 100
self.pg_id = 0
self.first_run = True
self.streams = 1
self.profile_data = []
self.profile = None
self.base_pkt = None
self.fsize = None
self.trex_vm = None
self.vms = []
self.rate = None
self.ether_packet = None
self.ip_packet = None
self.ip6_packet = None
self.udp_packet = None
self.udp = {
SRC_PORT: '',
DST_PORT: '',
}
self.qinq_packet = None
self.qinq = False
self.vm_flow_vars = []
self.packets = []
self._map_proto_actions = {
# the tuple is (single value function, range value function, if the values should be
# converted to integer).
ETHERNET: (self._general_single_action_partial(ETHERNET),
self._ethernet_range_action_partial,
False,
),
IP: (self._general_single_action_partial(IP),
self._ip_range_action_partial,
False,
),
IPv6: (self._general_single_action_partial(IPv6),
self._ip6_range_action_partial,
False,
),
DSCP: (self._general_single_action_partial(IP),
self._dscp_range_action_partial,
True,
),
UDP: (self._general_single_action_partial(UDP),
self._udp_range_action_partial,
True,
),
}
def execute_traffic(self, traffic_generator):
""" Generate the stream and run traffic on the given ports """
raise NotImplementedError()
def _call_on_range(self, range, single_action, range_action, count=1, to_int=False):
def convert_to_int(val):
return int(val) if to_int else val
range_iter = iter(str(range).split('-'))
min_value = convert_to_int(next(range_iter))
try:
max_value = convert_to_int(next(range_iter))
except StopIteration:
single_action(min_value)
else:
range_action(min_value=min_value, max_value=max_value)
def _set_proto_addr(self, protocol, field, address, count=1):
single_action, range_action, to_int = self._map_proto_actions[protocol]
self._call_on_range(address,
single_action(field),
range_action(field, count),
to_int=to_int,
)
def _set_proto_fields(self, protocol, **kwargs):
_attr_name, _class = self.PROTO_MAP[protocol]
if not getattr(self, _attr_name):
setattr(self, _attr_name, _class())
_attr = getattr(self, _attr_name)
for key, value in six.iteritems(kwargs):
setattr(_attr, key, value)
def set_svlan_cvlan(self, svlan, cvlan):
""" set svlan & cvlan """
self.qinq = True
ether_params = {'type': 0x8100}
self._set_proto_fields(ETHERNET, **ether_params)
svlans = str(svlan['id']).split('-')
svlan_min = int(svlans[0])
svlan_max = int(svlans[1]) if len(svlans) == 2 else int(svlans[0])
if len(svlans) == 2:
svlan = self._get_random_value(svlan_min, svlan_max)
else:
svlan = svlan_min
cvlans = str(cvlan['id']).split('-')
cvlan_min = int(cvlans[0])
cvlan_max = int(cvlans[1]) if len(cvlans) == 2 else int(cvlans[0])
if len(cvlans) == 2:
cvlan = self._get_random_value(cvlan_min, cvlan_max)
else:
cvlan = cvlan_min
self.qinq_packet = Pkt.Dot1Q(vlan=svlan) / Pkt.Dot1Q(vlan=cvlan)
def set_qinq(self, qinq):
""" set qinq in packet """
self.set_svlan_cvlan(qinq['S-VLAN'], qinq['C-VLAN'])
def _set_outer_l2_fields(self, outer_l2):
""" setup outer l2 fields from traffic profile """
ether_params = {'type': 0x800}
self._set_proto_fields(ETHERNET, **ether_params)
if 'srcmac' in outer_l2:
self._set_proto_addr(ETHERNET, SRC, outer_l2['srcmac'])
if 'dstmac' in outer_l2:
self._set_proto_addr(ETHERNET, DST, outer_l2['dstmac'])
if 'QinQ' in outer_l2:
self.set_qinq(outer_l2['QinQ'])
def _set_outer_l3v4_fields(self, outer_l3v4):
""" setup outer l3v4 fields from traffic profile """
ip_params = {}
if 'proto' in outer_l3v4:
ip_params['proto'] = socket.getprotobyname(outer_l3v4['proto'])
if outer_l3v4['proto'] == 'tcp':
self.udp_packet = Pkt.TCP()
self.udp[DST_PORT] = 'TCP.dport'
self.udp[SRC_PORT] = 'TCP.sport'
tcp_params = {'flags': '', 'window': 0}
self._set_proto_fields(UDP, **tcp_params)
if 'ttl' in outer_l3v4:
ip_params['ttl'] = outer_l3v4['ttl']
self._set_proto_fields(IP, **ip_params)
if 'dscp' in outer_l3v4:
self._set_proto_addr(DSCP, TYPE_OF_SERVICE, outer_l3v4['dscp'])
if 'srcip4' in outer_l3v4:
self._set_proto_addr(IP, SRC, outer_l3v4['srcip4'], outer_l3v4['count'])
if 'dstip4' in outer_l3v4:
self._set_proto_addr(IP, DST, outer_l3v4['dstip4'], outer_l3v4['count'])
def _set_outer_l3v6_fields(self, outer_l3v6):
""" setup outer l3v6 fields from traffic profile """
ether_params = {'type': 0x86dd}
self._set_proto_fields(ETHERNET, **ether_params)
ip6_params = {}
if 'proto' in outer_l3v6:
ip6_params['proto'] = outer_l3v6['proto']
if outer_l3v6['proto'] == 'tcp':
self.udp_packet = Pkt.TCP()
self.udp[DST_PORT] = 'TCP.dport'
self.udp[SRC_PORT] = 'TCP.sport'
tcp_params = {'flags': '', 'window': 0}
self._set_proto_fields(UDP, **tcp_params)
if 'ttl' in outer_l3v6:
ip6_params['ttl'] = outer_l3v6['ttl']
if 'tc' in outer_l3v6:
ip6_params['tc'] = outer_l3v6['tc']
if 'hlim' in outer_l3v6:
ip6_params['hlim'] = outer_l3v6['hlim']
self._set_proto_fields(IPv6, **ip6_params)
if 'srcip6' in outer_l3v6:
self._set_proto_addr(IPv6, SRC, outer_l3v6['srcip6'])
if 'dstip6' in outer_l3v6:
self._set_proto_addr(IPv6, DST, outer_l3v6['dstip6'])
def _set_outer_l4_fields(self, outer_l4):
""" setup outer l4 fields from traffic profile """
if 'srcport' in outer_l4:
self._set_proto_addr(UDP, SRC_PORT, outer_l4['srcport'], outer_l4['count'])
if 'dstport' in outer_l4:
self._set_proto_addr(UDP, DST_PORT, outer_l4['dstport'], outer_l4['count'])
def generate_imix_data(self, packet_definition):
""" generate packet size for a given traffic profile """
imix_count = {}
imix_data = {}
if not packet_definition:
return imix_count
imix = packet_definition.get('framesize')
if imix:
for size in imix:
data = imix[size]
imix_data[int(size[:-1])] = int(data)
imix_sum = sum(imix_data.values())
if imix_sum > 100:
raise SystemExit("Error in IMIX data")
elif imix_sum < 100:
imix_data[64] = imix_data.get(64, 0) + (100 - imix_sum)
avg_size = 0.0
for size in imix_data:
count = int(imix_data[size])
if count:
avg_size += round(size * count / 100, 2)
pps = round(self.pps * count / 100, 0)
imix_count[size] = pps
self.rate = round(1342177280 / avg_size, 0) * 2
logging.debug("Imax: %s rate: %s", imix_count, self.rate)
return imix_count
def get_streams(self, profile_data):
""" generate trex stream
:param profile_data:
:type profile_data:
"""
self.streams = []
self.pps = self.params['traffic_profile'].get('frame_rate', 100)
for packet_name in profile_data:
outer_l2 = profile_data[packet_name].get('outer_l2')
imix_data = self.generate_imix_data(outer_l2)
if not imix_data:
imix_data = {64: self.pps}
self.generate_vm(profile_data[packet_name])
for size in imix_data:
self._generate_streams(size, imix_data[size])
self._generate_profile()
return self.profile
def generate_vm(self, packet_definition):
""" generate trex vm with flows setup """
self.ether_packet = Pkt.Ether()
self.ip_packet = Pkt.IP()
self.ip6_packet = None
self.udp_packet = Pkt.UDP()
self.udp[DST_PORT] = 'UDP.dport'
self.udp[SRC_PORT] = 'UDP.sport'
self.qinq = False
self.vm_flow_vars = []
outer_l2 = packet_definition.get('outer_l2', None)
outer_l3v4 = packet_definition.get('outer_l3v4', None)
outer_l3v6 = packet_definition.get('outer_l3v6', None)
outer_l4 = packet_definition.get('outer_l4', None)
if outer_l2:
self._set_outer_l2_fields(outer_l2)
if outer_l3v4:
self._set_outer_l3v4_fields(outer_l3v4)
if outer_l3v6:
self._set_outer_l3v6_fields(outer_l3v6)
if outer_l4:
self._set_outer_l4_fields(outer_l4)
self.trex_vm = STLScVmRaw(self.vm_flow_vars)
def generate_packets(self):
""" generate packets from trex TG """
base_pkt = self.base_pkt
size = self.fsize - 4
pad = max(0, size - len(base_pkt)) * 'x'
self.packets = [STLPktBuilder(pkt=base_pkt / pad,
vm=vm) for vm in self.vms]
def _create_single_packet(self, size=64):
size = size - 4
ether_packet = self.ether_packet
ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
udp_packet = self.udp_packet
if self.qinq:
qinq_packet = self.qinq_packet
base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
else:
base_pkt = ether_packet / ip_packet / udp_packet
pad = max(0, size - len(base_pkt)) * 'x'
packet = STLPktBuilder(pkt=base_pkt / pad, vm=self.trex_vm)
return packet
def _create_single_stream(self, packet_size, pps, isg=0):
packet = self._create_single_packet(packet_size)
if self.pg_id:
self.pg_id += 1
stl_flow = STLFlowLatencyStats(pg_id=self.pg_id)
stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps),
flow_stats=stl_flow)
else:
stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps))
return stream
def _generate_streams(self, packet_size, pps):
self.streams.append(self._create_single_stream(packet_size, pps))
def _generate_profile(self):
self.profile = STLProfile(self.streams)
@classmethod
def _get_start_end_ipv6(cls, start_ip, end_ip):
try:
ip1 = socket.inet_pton(socket.AF_INET6, start_ip)
ip2 = socket.inet_pton(socket.AF_INET6, end_ip)
hi1, lo1 = struct.unpack('!QQ', ip1)
hi2, lo2 = struct.unpack('!QQ', ip2)
if ((hi1 << 64) | lo1) > ((hi2 << 64) | lo2):
raise SystemExit("IPv6: start_ip is greater then end_ip")
max_p1 = abs(int(lo1) - int(lo2))
base_p1 = lo1
except Exception as ex_error:
raise SystemExit(ex_error)
else:
return base_p1, max_p1 + base_p1
@classmethod
def _get_random_value(cls, min_port, max_port):
cryptogen = SystemRandom()
return cryptogen.randrange(min_port, max_port)
| 41.73991 | 96 | 0.565374 |
8b8c5001f1f33552628800c14ee4580c819ffb45 | 585 | py | Python | solutions/d2p1.py | robbiesri/advent-of-code-2018 | bc40ea1b8259a2b832092067bd4ab02e314412a9 | [
"MIT"
] | null | null | null | solutions/d2p1.py | robbiesri/advent-of-code-2018 | bc40ea1b8259a2b832092067bd4ab02e314412a9 | [
"MIT"
] | null | null | null | solutions/d2p1.py | robbiesri/advent-of-code-2018 | bc40ea1b8259a2b832092067bd4ab02e314412a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import commonAOC
from collections import Counter
import string
inputDataFile = commonAOC.loadInputData("d2.txt")
total2count = 0
total3count = 0
for line in inputDataFile:
count2 = False
count3 = False
counter = Counter(line)
for k in string.ascii_lowercase:
letterCount = counter[k]
if letterCount == 2:
count2 = True
if letterCount == 3:
count3 = True
if count2:
total2count += 1
if count3:
total3count += 1
checksum = total2count * total3count
print(checksum) | 18.870968 | 49 | 0.639316 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.