max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
python/one-liner/cluster_of_non_0.py | Hamng/python-sources | 0 | 13400 | <reponame>Hamng/python-sources<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 07:38:05 2020
@author: Ham
Self Challenge: Count Cluster of Non-0s
Given a 1-dimension array of integers,
determine how many 'clusters' of non-0 in the array.
A 'cluster' is a group of consecutive non-0 values.
Scoring: a solution needs to be a 1-liner;
i.e. NO point if implementing with a traditional 'for' loop!
Sample Input (see STDIN_SIO)
A : [
9, 0, 0, 22, 0, 0, 39, 11, 3, 0, \
0, 24, 1, 0, 50, 23, 3, 44, 0, 23, \
25, 6, 36, 19, 10, 23, 0, 37, 4, 1, \
7, 12, 0, 0, 49
]
Expected Output:
8
"""
import itertools
STDIN_SIO = """
9, 0, 0, 22, 0, 0, 39, 11, 3, 0, \
0, 24, 1, 0, 50, 23, 3, 44, 0, 23, \
2, 8, 20, 35, 0, 40, 34, 26, 36, 0, \
35, 19, 20, 18, 11, 43, 19, 21, 40, 0, \
14, 0, 14, 0, 0, 25, 35, 24, 49, 15, \
13, 3, 0, 10, 31, 25, 27, 37, 27, 43, \
44, 27, 8, 43, 0, 0, 33, 25, 19, 47, \
0, 29, 5, 2, 12, 8, 7, 0, 16, 36, \
0, 6, 17, 35, 36, 21, 0, 9, 1, 0, \
43, 29, 39, 15, 18, 0, 34, 26, 48, 0, \
34, 35, 7, 10, 0, 0, 15, 5, 12, 26, \
0, 37, 30, 33, 27, 34, 9, 37, 22, 0, \
0, 24, 30, 0, 0, 38, 23, 25, 0, 30, \
39, 24, 31, 0, 6, 19, 25, 0, 28, 15, \
8, 0, 48, 0, 35, 41, 0, 24, 1, 41, \
31, 0, 35, 21, 15, 26, 15, 27, 4, 0, \
8, 4, 0, 0, 2, 42, 18, 0, 28, 18, \
49, 34, 5, 10, 41, 48, 26, 14, 45, 44, \
9, 0, 49, 50, 24, 0, 0, 0, 23, 0, \
17, 0, 47, 31, 0, 42, 0, 0, 0, 40, \
46, 22, 50, 32, 20, 3, 44, 22, 0, 37, \
25, 0, 19, 26, 14, 23, 27, 41, 0, 1, \
13, 0, 48, 20, 37, 8, 0, 18, 0, 26, \
12, 19, 32, 19, 22, 0, 0, 0, 0, 0, \
16, 0, 0, 43, 0, 10, 5, 0, 6, 26, \
0, 24, 40, 29, 0, 43, 18, 27, 0, 0, \
37, 0, 46, 35, 17, 0, 20, 44, 29, 29, \
40, 33, 22, 27, 0, 0, 38, 21, 4, 0, \
0, 15, 31, 48, 36, 10, 0, 41, 0, 45, \
39, 0, 11, 9, 3, 38, 16, 0, 11, 22, \
37, 0, 3, 44, 10, 12, 47, 22, 32, 7, \
24, 1, 0, 22, 25, 0, 14, 0, 0, 0, \
23, 0, 36, 1, 42, 46, 0, 48, 0, 33, \
5, 27, 45, 0, 15, 29, 0, 50, 2, 31, \
25, 6, 36, 19, 10, 23, 0, 37, 4, 1, \
7, 12, 0, 0, 49
""".strip()
def count_non_0_clusters_1(arr):
"""Translate each non-0 to an 'A' char, and 0 to a space.
Then join together to become a string.
Then split(), then return number of tokens.
"""
return len("".join(["A" if e else " " for e in arr]).split())
def count_non_0_clusters_2(arr):
"""groupby() partitions into groups as:
[[True , [list of non-0]],
[False, [list of 0s]],
[True , [list of non-0]],
[False, [list of 0s]],
...
[True , [list of non-0]]]
(Old) Next, the list comprenhension iterates thru each tuple,
then collects the 1st element in each tuple if True.
Finally, return the len/count of Trues:
return len([t[0] for t in itertools.groupby(...) if t[0]])
Next, the list comprenhension iterates thru each tuple,
then collects the 1st element in each tuple.
Then return the count() of True elements.
"""
return [t[0] for t in itertools.groupby(arr, lambda e: bool(e))].count(True)
if __name__ == '__main__':
a = list(map(int, STDIN_SIO.split(",")))
# Nicely print it, 10 entries per line, with continuation
# so can copy-n-paste back into STDIN_SIO
#print(len(a))
#for i in range(0, (len(a) // 10) * 10, 10):
# print("%3u," * 10 % tuple(a[i:i+10]), end=" \\\n")
#j = a[(len(a) // 10) * 10:]
#print("%3u," * (len(j) - 1) % tuple(j[:-1]), end="")
#print("%3u" % j[-1])
print("count_*_1() returns", count_non_0_clusters_1(a), "clusters of non-0")
print("count_*_2() returns", count_non_0_clusters_2(a), "clusters of non-0")
| 3.484375 | 3 |
cardano-node-tests/cardano_node_tests/tests/test_configuration.py | MitchellTesla/Cardano-SCK | 6 | 13401 | <filename>cardano-node-tests/cardano_node_tests/tests/test_configuration.py
"""Tests for node configuration."""
import json
import logging
import time
from pathlib import Path
import allure
import pytest
from _pytest.tmpdir import TempdirFactory
from cardano_clusterlib import clusterlib
from cardano_node_tests.utils import cluster_management
from cardano_node_tests.utils import cluster_nodes
from cardano_node_tests.utils import configuration
from cardano_node_tests.utils import helpers
LOGGER = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def create_temp_dir(tmp_path_factory: TempdirFactory):
"""Create a temporary dir."""
p = Path(tmp_path_factory.getbasetemp()).joinpath(helpers.get_id_for_mktemp(__file__)).resolve()
p.mkdir(exist_ok=True, parents=True)
return p
@pytest.fixture
def temp_dir(create_temp_dir: Path):
"""Change to a temporary dir."""
with helpers.change_cwd(create_temp_dir):
yield create_temp_dir
# use the "temp_dir" fixture for all tests automatically
pytestmark = pytest.mark.usefixtures("temp_dir")
@pytest.fixture(scope="module")
def epoch_length_start_cluster(tmp_path_factory: TempdirFactory) -> Path:
"""Update *epochLength* to 1200."""
pytest_globaltemp = helpers.get_pytest_globaltemp(tmp_path_factory)
# need to lock because this same fixture can run on several workers in parallel
with helpers.FileLockIfXdist(f"{pytest_globaltemp}/startup_files_epoch_1200.lock"):
destdir = pytest_globaltemp / "startup_files_epoch_1200"
destdir.mkdir(exist_ok=True)
# return existing script if it is already generated by other worker
destdir_ls = list(destdir.glob("start-cluster*"))
if destdir_ls:
return destdir_ls[0]
startup_files = cluster_nodes.get_cluster_type().cluster_scripts.copy_scripts_files(
destdir=destdir
)
with open(startup_files.genesis_spec) as fp_in:
genesis_spec = json.load(fp_in)
genesis_spec["epochLength"] = 1500
with open(startup_files.genesis_spec, "w") as fp_out:
json.dump(genesis_spec, fp_out)
return startup_files.start_script
@pytest.fixture(scope="module")
def slot_length_start_cluster(tmp_path_factory: TempdirFactory) -> Path:
"""Update *slotLength* to 0.3."""
pytest_globaltemp = helpers.get_pytest_globaltemp(tmp_path_factory)
# need to lock because this same fixture can run on several workers in parallel
with helpers.FileLockIfXdist(f"{pytest_globaltemp}/startup_files_slot_03.lock"):
destdir = pytest_globaltemp / "startup_files_slot_03"
destdir.mkdir(exist_ok=True)
# return existing script if it is already generated by other worker
destdir_ls = list(destdir.glob("start-cluster*"))
if destdir_ls:
return destdir_ls[0]
startup_files = cluster_nodes.get_cluster_type().cluster_scripts.copy_scripts_files(
destdir=destdir
)
with open(startup_files.genesis_spec) as fp_in:
genesis_spec = json.load(fp_in)
genesis_spec["slotLength"] = 0.3
with open(startup_files.genesis_spec, "w") as fp_out:
json.dump(genesis_spec, fp_out)
return startup_files.start_script
@pytest.fixture
def cluster_epoch_length(
cluster_manager: cluster_management.ClusterManager, epoch_length_start_cluster: Path
) -> clusterlib.ClusterLib:
return cluster_manager.get(
singleton=True, cleanup=True, start_cmd=str(epoch_length_start_cluster)
)
@pytest.fixture
def cluster_slot_length(
cluster_manager: cluster_management.ClusterManager, slot_length_start_cluster: Path
) -> clusterlib.ClusterLib:
return cluster_manager.get(
singleton=True, cleanup=True, start_cmd=str(slot_length_start_cluster)
)
def check_epoch_length(cluster_obj: clusterlib.ClusterLib) -> None:
end_sec = 15
end_sec_padded = end_sec + 15 # padded to make sure tip got updated
cluster_obj.wait_for_new_epoch()
epoch = cluster_obj.get_epoch()
sleep_time = cluster_obj.epoch_length_sec - end_sec
time.sleep(sleep_time)
assert epoch == cluster_obj.get_epoch()
time.sleep(end_sec_padded)
assert epoch + 1 == cluster_obj.get_epoch()
@pytest.mark.run(order=3)
@pytest.mark.skipif(
bool(configuration.TX_ERA),
reason="different TX eras doesn't affect this test, pointless to run",
)
class TestBasic:
"""Basic tests for node configuration."""
@allure.link(helpers.get_vcs_link())
def test_epoch_length(self, cluster_epoch_length: clusterlib.ClusterLib):
"""Test the *epochLength* configuration."""
cluster = cluster_epoch_length
assert cluster.slot_length == 0.2
assert cluster.epoch_length == 1500
check_epoch_length(cluster)
@allure.link(helpers.get_vcs_link())
@pytest.mark.run(order=2)
def test_slot_length(self, cluster_slot_length: clusterlib.ClusterLib):
"""Test the *slotLength* configuration."""
cluster = cluster_slot_length
assert cluster.slot_length == 0.3
assert cluster.epoch_length == 1000
check_epoch_length(cluster)
| 1.867188 | 2 |
output/myresults.py | jacobseiler/rsage | 1 | 13402 | <gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import os
import heapq
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors as colors
import matplotlib.cm as cm
from numpy import *
from random import sample, seed, randint
from os.path import getsize as getFileSize
import math
import random
import csv
from cycler import cycler
from io import StringIO
#np.set_printoptions(threshold=np.nan)
from collections import Counter
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import AxesGrid
from astropy import units as u
from astropy import cosmology
import matplotlib.ticker as mtick
import PlotScripts
import ReadScripts
import AllVars
import GalaxyPhotoion as photo
import ObservationalData as Obs
import gnedin_analytic as ga
from mpi4py import MPI
import sys
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
AllVars.Set_Params_Kali()
AllVars.Set_Constants()
PlotScripts.Set_Params_Plot()
output_format = ".png"
# For the Tiamat extended results there is a weird hump when calculating the escape fraction.
# This hump occurs at a halo mass of approximately 10.3.
# The calculation of fesc skips this hump range (defined from kink_low to kink_high)
kink_low = 10.3
kink_high = 10.30000001
m_low = 7.0 # We only sum the photons coming from halos within the mass range m_low < Halo Mass < m_high
m_high = 15.0
m_gal_low = 3.0
m_gal_high = 12.0
m_low_SAGE = pow(10, m_low)/1.0e10 * AllVars.Hubble_h
m_high_SAGE = pow(10, m_high)/1.0e10 * AllVars.Hubble_h
bin_width = 0.2
NB = int((m_high - m_low) / bin_width)
NB_gal = int((m_gal_high - m_gal_low) / bin_width)
fej_low = 0.0
fej_high = 1.0
fej_bin_width = 0.05
NB_fej = int((fej_high - fej_low) / fej_bin_width)
def raise_list_power(my_list, n):
return [pow(x, n) for x in my_list]
def raise_power_list(my_list, n):
return [pow(n, x) for x in my_list]
def calculate_beta(MUV, z):
'''
Calculation of the dust attenuation parameter Beta. Fit values are from Bouwens (2015) ApJ 793, 115.
For z = 5 and 6, Bouwens uses a piece-wise linear relationship and a linear relationship for higher redshift. ##
Parameters
----------
MUV : `float'
A value of the absolute magnitude in the UV (generally M1600) in the AB magnitude system.
z : `float'
Redshift the attenuation is calculated at.
Returns
------
beta : `float'
Value of the UV continuum paramaeter beta.
'''
if (z >= 4.5 and z < 5.5): # z = 5 fits.
if (MUV > -18.8):
dB = -0.08
else:
dB = -0.17
B = -2.05
offset = 18.8
elif (z >= 5.5 and z < 6.5): # z = 6 fits.
if (MUV > -18.8):
dB = -0.08
else:
dB = -0.24
B = -2.22
offset = 18.8
elif (z >= 6.5 and z < 7.5): # z = 7 fits.
dB = -0.20
B = -2.05
offset = 19.5
elif (z >= 7.5 and z < 8.5): # z = 8 fits.
dB = -0.15
B = -2.13
offset = 19.5
elif (z >= 8.5 and z < 9.5): # z = 9 fits.
dB = -0.16
B = -2.19
offset = 19.5
elif (z >= 9.5 and z < 10.5): # z = 10 fits.
dB = -0.16
B = -2.16
offset = 19.5
beta = dB * (MUV + offset) + B
return beta
def multiply(array):
'''
Performs element wise multiplication.
Parameters
----------
array : `~numpy.darray'
The array to be multiplied.
Returns
-------
total : `float'
Total of the elements multiplied together.
'''
total = 1
for i in range(0, len(array)):
total *= array[i]
return total
##
def Sum_Log(array):
'''
Performs an element wise sum of an array who's elements are in log-space.
Parameters
----------
array : array
Array with elements in log-space.
Returns
------
sum_total : float
Value of the elements taken to the power of 10 and summed.
Units
-----
All units are kept the same as the inputs.
'''
sum_total = 0.0
for i in range(0, len(array)):
sum_total += 10**array[i]
return sum_total
##
def Std_Log(array, mean):
'''
Calculates the standard deviation of an array with elements in log-space.
Parameters
----------
array : array
Array with elements in log-space.
mean : float
Mean of the array (not in log).
Returns
------
std : float
Standard deviation of the input array taken to the power of 10.
Units
-----
All units are kept the same as the inputs.
'''
sum_total = 0.0
for i in range(0, len(array)):
sum_total += (10**array[i] - mean)**2
sum_total *= 1.0/len(array)
std = np.sqrt(sum_total)
return std
###
def collect_across_tasks(mean_per_task, std_per_task, N_per_task, SnapList,
BinSnapList=[], binned=False, m_bin_low=0.0,
m_bin_high=0.0, my_bin_width=bin_width):
"""
Reduces arrays that are unique to each task onto the master task.
The dimensions of the input arrays will change slightly if we are collecting a statistics
that is binned across e.g., halo mass or galaxy stellar mass.
Parameters
----------
mean_per_task, std_per_task, N_per_task: Nested 2D (or 3D if binned == True) arrays of floats.
Outer length is equal to the number of models.
Inner length is equal to the number of snapshots the data has been calculated for.
Most inner length is equal to the number of bins.
Contains the mean/standard deviation/number of objects unique for each task.
SnapList: Nested 2D arrays of integers. Outer length is equal to the number of models.
Contains the snapshot numbers the data has been calculated for each model.
BinSnapList: Nested 2D arrays of integers. Outer length is equal to the number of models.
Often statistics are calculated for ALL snapshots but we only wish to plot for a subset of snapshots.
This variable allows the binned data to be collected for only a subset of the snapshots.
binned: Boolean.
Dictates whether the collected data is a 2D or 3D array with the inner-most array being binned across e.g., halo mass.
Returns
----------
master_mean, master_std, master_N: Nested 2D (or 3D if binned == True) arrays of floats.
Shape is identical to the input mean_per_task etc.
If rank == 0 these contain the collected statistics.
Otherwise these will be none.
master_bin_middle: Array of floats.
Contains the location of the middle of the bins for the data.
"""
master_mean = []
master_std = []
master_N = []
master_bin_middle = []
for model_number in range(0, len(SnapList)):
master_mean.append([])
master_std.append([])
master_N.append([])
master_bin_middle.append([])
# If we're collecting a binned statistic (e.g., binned across halo mass), then we need to perform the collecting per snapshot.
if binned:
count = 0
for snapshot_idx in range(len(SnapList[model_number])):
if SnapList[model_number][snapshot_idx] == BinSnapList[model_number][count]:
master_mean[model_number], master_std[model_number], master_N[model_number] = calculate_pooled_stats(master_mean[model_number], master_std[model_number], master_N[model_number], mean_per_task[model_number][snapshot_idx], std_per_task[model_number][snapshot_idx], N_per_task[model_number][snapshot_idx])
master_bin_middle[model_number].append(np.arange(m_bin_low,
m_bin_high+my_bin_width,
my_bin_width)[:-1]
+ my_bin_width* 0.5)
count += 1
if count == len(BinSnapList[model_number]):
break
else:
master_mean[model_number], master_std[model_number], master_N[model_number] = calculate_pooled_stats(master_mean[model_number], master_std[model_number], master_N[model_number],
mean_per_task[model_number], std_per_task[model_number],
N_per_task[model_number])
if rank == 0:
master_mean[model_number] = master_mean[model_number][0]
master_std[model_number] = master_std[model_number][0]
master_N[model_number] = master_N[model_number][0]
return master_mean, master_std, master_N, master_bin_middle
###
def calculate_pooled_stats(mean_pool, std_pool, N_pool, mean_local, std_local, N_local):
'''
Calculates the pooled mean and standard deviation from multiple processors and appends it to an input array.
Formulae taken from https://en.wikipedia.org/wiki/Pooled_variance
As we only care about these stats on the rank 0 process, we make use of junk inputs/outputs for other ranks.
NOTE: Since the input data may be an array (e.g. pooling the mean/std for a stellar mass function).
Parameters
----------
mean_pool, std_pool, N_pool : array of floats.
Arrays that contain the current pooled means/standard deviation/number of data points (for rank 0) or just a junk input (for other ranks).
mean_local, mean_std : float or array of floats.
The non-pooled mean and standard deviation unique for each process.
N_local : floating point number or array of floating point numbers.
Number of data points used to calculate the mean/standard deviation that is going to be added to the pool.
NOTE: Use floating point here so we can use MPI.DOUBLE for all MPI functions.
Returns
-------
mean_pool, std_pool : array of floats.
Original array with the new pooled mean/standard deviation appended (for rank 0) or the new pooled mean/standard deviation only (for other ranks).
Units
-----
All units are the same as the input.
All inputs MUST BE real-space (not log-space).
'''
if isinstance(mean_local, list) == True:
if len(mean_local) != len(std_local):
print("len(mean_local) = {0} \t len(std_local) = {1}".format(len(mean_local), len(std_local)))
raise ValueError("Lengths of mean_local and std_local should be equal")
if ((type(mean_local).__module__ == np.__name__) == True or (isinstance(mean_local, list) == True)): # Checks to see if we are dealing with arrays.
N_times_mean_local = np.multiply(N_local, mean_local)
N_times_var_local = np.multiply(N_local, np.multiply(std_local, std_local))
N_local = np.array(N_local).astype(float)
N_times_mean_local = np.array(N_times_mean_local).astype(np.float32)
if rank == 0: # Only rank 0 holds the final arrays so only it requires proper definitions.
N_times_mean_pool = np.zeros_like(N_times_mean_local)
N_pool_function = np.zeros_like(N_local)
N_times_var_pool = np.zeros_like(N_times_var_local)
N_times_mean_pool = N_times_mean_pool.astype(np.float64) # Recast everything to double precision then use MPI.DOUBLE.
N_pool_function = N_pool_function.astype(np.float64)
N_times_var_pool = N_times_var_pool.astype(np.float64)
else:
N_times_mean_pool = None
N_pool_function = None
N_times_var_pool = None
comm.Barrier()
N_times_mean_local = N_times_mean_local.astype(np.float64)
N_local = N_local.astype(np.float64)
N_times_var_local = N_times_var_local.astype(np.float64)
comm.Reduce([N_times_mean_local, MPI.DOUBLE], [N_times_mean_pool, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the arrays across processors.
comm.Reduce([N_local, MPI.DOUBLE],[N_pool_function, MPI.DOUBLE], op = MPI.SUM, root = 0)
comm.Reduce([N_times_var_local, MPI.DOUBLE], [N_times_var_pool, MPI.DOUBLE], op = MPI.SUM, root = 0)
else:
N_times_mean_local = N_local * mean_local
N_times_var_local = N_local * std_local * std_local
N_times_mean_pool = comm.reduce(N_times_mean_local, op = MPI.SUM, root = 0)
N_pool_function = comm.reduce(N_local, op = MPI.SUM, root = 0)
N_times_var_pool = comm.reduce(N_times_var_local, op = MPI.SUM, root = 0)
if rank == 0:
mean_pool_function = np.zeros((len(N_pool_function)))
std_pool_function = np.zeros((len(N_pool_function)))
for i in range(0, len(N_pool_function)):
if N_pool_function[i] == 0:
mean_pool_function[i] = 0.0
else:
mean_pool_function[i] = np.divide(N_times_mean_pool[i], N_pool_function[i])
if N_pool_function[i] < 3:
std_pool_function[i] = 0.0
else:
std_pool_function[i] = np.sqrt(np.divide(N_times_var_pool[i], N_pool_function[i]))
mean_pool.append(mean_pool_function)
std_pool.append(std_pool_function)
N_pool.append(N_pool_function)
return mean_pool, std_pool, N_pool
else:
return mean_pool, std_pool, N_pool_function # Junk return because non-rank 0 doesn't care.
##
def StellarMassFunction(SnapList, SMF, simulation_norm, FirstFile, LastFile, NumFile, ResolutionLimit_mean, model_tags, observations, paper_plot, output_tag):
'''
Calculates the stellar mass function for given galaxies with the option to overplot observations by Song et al. (2013) at z = 6, 7, 8 and/or Baldry et al. (2008) at z = 0.1.
Parallel compatible.
NOTE: The plotting assumes the redshifts we are plotting at are (roughly) the same for each model.
Parameters
---------
SnapList : Nested 'array-like`, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots that we plot the stellar mass function at for each model.
SMF : Nested 2-dimensional array, SMF[model_number0][snapshot0] = [bin0galaxies, ..., binNgalaxies], with length equal to the number of bins (NB_gal).
The count of galaxies within each stellar mass bin. Bounds are given by 'm_gal_low' and 'm_gal_high' in bins given by 'bin_width'.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
ResolutionLimit_mean : array of floats with the same shape as SMF.
This is the mean stellar mass for a halo with len (number of N-body simulation particles) between 'stellar_mass_halolen_lower' and 'stellar_mass_halolen_upper'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
observations : int
Denotes whether we want to overplot observational results.
0 : Don't plot anything.
1 : Plot Song et al. (2016) at z = 6, 7, 8.
2 : Plot Baldry et al. (2008) at z = 0.1.
3 : Plot both of these.
paper_plot : int
Denotes whether we want to split the plotting over three panels (z = 6, 7, 8) for the paper or keep it all to one figure.
output_tag : string
Name of the file that will be generated. File will be saved in the current directory with the output format defined by the 'output_format' variable at the beggining of the file.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Stellar Mass is in units of log10(Msun).
'''
## Empty array initialization ##
title = []
normalization_array = []
redshift_labels = []
counts_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
counts_array.append([])
bin_middle_array.append([])
redshift_labels.append([])
####
for model_number in range(0, len(SnapList)): # Does this for each of the models.
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
print("We are creating the stellar mass function using {0:.4f} of the box's volume.".format(box_factor))
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * bin_width * box_factor
normalization_array.append(norm)
####
for snapshot_idx in range(0, len(SnapList[model_number])): # Loops for each snapshot in each model.
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]) # Assigns a redshift label.
redshift_labels[model_number].append(tmp)
## We perform the plotting on Rank 0 so only this rank requires the final counts array. ##
if rank == 0:
counts_total = np.zeros_like(SMF[model_number][snapshot_idx])
else:
counts_total = None
comm.Reduce([SMF[model_number][snapshot_idx], MPI.FLOAT], [counts_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
if rank == 0:
counts_array[model_number].append(counts_total)
bin_middle_array[model_number].append(np.arange(m_gal_low, m_gal_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
####
## Plotting ##
if rank == 0: # Plot only on rank 0.
if paper_plot == 0:
f = plt.figure()
ax = plt.subplot(111)
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0: # We assume the redshifts for each model are the same, we only want to put a legend label for each redshift once.
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
plt.plot(bin_middle_array[model_number][snapshot_idx], counts_array[model_number][snapshot_idx] / normalization_array[model_number], color = PlotScripts.colors[snapshot_idx], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
#print(np.min(np.log10(ResolutionLimit_mean)))
#ax.axvline(np.max(np.log10(ResolutionLimit_mean)), color = 'k', linewidth = PlotScripts.global_linewidth, linestyle = '--')
#ax.text(np.max(np.log10(ResolutionLimit_mean)) + 0.1, 1e-3, "Resolution Limit", color = 'k')
for model_number in range(0, len(SnapList)): # Place legend labels for each of the models. NOTE: Placed after previous loop for proper formatting of labels.
plt.plot(1e100, 1e100, color = 'k', linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], rasterized=True, linewidth = PlotScripts.global_linewidth)
## Adjusting axis labels/limits. ##
plt.yscale('log', nonposy='clip')
plt.axis([6, 11.5, 1e-6, 1e-0])
ax.set_xlabel(r'$\log_{10}\ m_{\mathrm{*}} \:[M_{\odot}]$', fontsize = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]$', fontsize = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.25))
ax.set_xticks(np.arange(6.0, 12.0))
if (observations == 1 or observations == 3): # If we wanted to plot Song.
Obs.Get_Data_SMF()
delta = 0.05
caps = 5
## Song (2016) Plotting ##
plt.errorbar(Obs.Song_SMF_z6[:,0], 10**Obs.Song_SMF_z6[:,1], yerr= (10**Obs.Song_SMF_z6[:,1] - 10**Obs.Song_SMF_z6[:,3], 10**Obs.Song_SMF_z6[:,2] - 10**Obs.Song_SMF_z6[:,1]), xerr = 0.25, capsize = caps, elinewidth = PlotScripts.global_errorwidth, alpha = 1.0, lw=2.0, marker='o', ls='none', label = 'Song 2015, z = 6', color = PlotScripts.colors[0], rasterized=True)
plt.errorbar(Obs.Song_SMF_z7[:,0], 10**Obs.Song_SMF_z7[:,1], yerr= (10**Obs.Song_SMF_z7[:,1] - 10**Obs.Song_SMF_z7[:,3], 10**Obs.Song_SMF_z7[:,2] - 10**Obs.Song_SMF_z7[:,1]), xerr = 0.25, capsize = caps, alpha=0.75, elinewidth = PlotScripts.global_errorwidth, lw=1.0, marker='o', ls='none', label = 'Song 2015, z = 7', color = PlotScripts.colors[1], rasterized=True)
plt.errorbar(Obs.Song_SMF_z8[:,0], 10**Obs.Song_SMF_z8[:,1], yerr= (10**Obs.Song_SMF_z8[:,1] - 10**Obs.Song_SMF_z8[:,3], 10**Obs.Song_SMF_z8[:,2] - 10**Obs.Song_SMF_z8[:,1]), xerr = 0.25, capsize = caps, alpha=0.75, elinewidth = PlotScripts.global_errorwidth, lw=1.0, marker='o', ls='none', label = 'Song 2015, z = 8', color = PlotScripts.colors[2], rasterized=True)
####
if ((observations == 2 or observations == 3) and rank == 0): # If we wanted to plot Baldry.
Baldry_xval = np.log10(10 ** Obs.Baldry_SMF_z0[:, 0] /AllVars.Hubble_h/AllVars.Hubble_h)
Baldry_xval = Baldry_xval - 0.26 # convert back to Chabrier IMF
Baldry_yvalU = (Obs.Baldry_SMF_z0[:, 1]+Obs.Baldry_SMF_z0[:, 2]) * AllVars.Hubble_h*AllVars.Hubble_h*AllVars.Hubble_h
Baldry_yvalL = (Obs.Baldry_SMF_z0[:, 1]-Obs.Baldry_SMF_z0[:, 2]) * AllVars.Hubble_h*AllVars.Hubble_h*AllVars.Hubble_h
plt.fill_between(Baldry_xval, Baldry_yvalU, Baldry_yvalL,
facecolor='purple', alpha=0.25, label='Baldry et al. 2008 (z=0.1)')
####
leg = plt.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = './%s%s' %(output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
if (paper_plot == 1):
fig, ax = plt.subplots(nrows=1, ncols=3, sharex=False, sharey=True, figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
for model_number in range(0, len(SnapList)):
for count in range(len(SnapList[model_number])):
w = np.where((counts_array[model_number][count] > 0))[0]
ax[count].plot(bin_middle_array[model_number][count][w], counts_array[model_number][count][w]
/ normalization_array[model_number], color = PlotScripts.colors[model_number],
linestyle = PlotScripts.linestyles[model_number], rasterized = True,
label = r"$\mathbf{SAGE}$", linewidth = PlotScripts.global_linewidth)
tick_locs = np.arange(6.0, 12.0)
ax[count].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs], fontsize = PlotScripts.global_fontsize)
ax[count].set_xlim([6.8, 10.3])
ax[count].tick_params(which = 'both', direction='in',
width = PlotScripts.global_tickwidth)
ax[count].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[count].tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
ax[count].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
fontsize = PlotScripts.global_labelsize - delta_fontsize)
ax[count].xaxis.set_minor_locator(plt.MultipleLocator(0.25))
#ax[count].set_xticks(np.arange(6.0, 12.0))
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[count].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
# Since y-axis is shared, only need to do this once.
ax[0].set_yscale('log', nonposy='clip')
ax[0].set_yticklabels([r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-4}}$", r"$\mathbf{10^{-3}}$",
r"$\mathbf{10^{-2}}$",r"$\mathbf{10^{-1}}$"])
ax[0].set_ylim([1e-5, 1e-1])
#ax[0].set_ylabel(r'\mathbf{$\log_{10} \Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]}$',
ax[0].set_ylabel(r'$\mathbf{log_{10} \: \Phi\ [Mpc^{-3}\: dex^{-1}]}$',
fontsize = PlotScripts.global_labelsize - delta_fontsize)
Obs.Get_Data_SMF()
PlotScripts.Plot_SMF_z6(ax[0], errorwidth=ewidth, capsize=caps)
PlotScripts.Plot_SMF_z7(ax[1], errorwidth=ewidth, capsize=caps)
PlotScripts.Plot_SMF_z8(ax[2], errorwidth=ewidth, capsize=caps)
####
ax[0].text(0.7, 0.9, r"$\mathbf{z = 6}$", transform = ax[0].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
ax[1].text(0.7, 0.9, r"$\mathbf{z = 7}$", transform = ax[1].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
ax[2].text(0.7, 0.9, r"$\mathbf{z = 8}$", transform = ax[2].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
#leg = ax[0,0].legend(loc=2, bbox_to_anchor = (0.2, -0.5), numpoints=1, labelspacing=0.1)
leg = ax[0].legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize - 2)
plt.tight_layout()
outputFile = "{0}_paper{1}".format(output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_fesc_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc,
mean_halo_fesc, std_halo_fesc, N_halo_fesc,
ResolutionLimit_mean, model_tags, paper_plots,
mass_global, fesc_global, Ngamma_global, output_tag):
"""
Plots the escape fraction as a function of stellar/halo mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc : Nested 3-dimensional array, mean_galaxy_fesc[model_number0][snapshot0] = [bin0_meanfesc, ..., binN_meanfesc], with length equal to the number of models.
Mean/Standard deviation for fesc in each stellar mass bin, for each [model_number] and [snapshot_number]. N_galaxy_fesc is the number of galaxies placed into each mass bin.
mean_halo_fesc, std_halo_fesc, N_halo_fesc Nested 3-dimensional array, mean_halo_fesc[model_number0][snapshot0] = [bin0_meanfesc, ..., binN_meanfesc], with length equal to the number of models.
Identical to previous except using the halo virial mass for the binning rather than stellar mass.
ResolutionLimit_mean : array of floats with the same shape as mean_galaxy_fesc.
This is the mean stellar mass for a halo with len (number of N-body simulation particles) between 'stellar_mass_halolen_lower' and 'stellar_mass_halolen_upper'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
"""
def adjust_stellarmass_plot(ax):
#ax.axhline(0.20, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(7.8, 0.22, r"$f_\mathrm{esc, base}$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([6.8, 10])
ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
tick_locs = np.arange(0.0, 0.80, 0.10)
ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_paper_plots(ax, model_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 0.68])
ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(model_tags[count])
label = model_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
tick_locs = np.arange(-0.1, 0.80, 0.10)
ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
print("x")
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("y")
labels = ax[1,0].yaxis.get_ticklabels()
locs = ax[1,0].yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("Plotting fesc as a function of stellar mass.")
## Array initialization ##
master_mean_fesc_stellar, master_std_fesc_stellar, master_N_fesc_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
fig2, ax2 = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
count_x = 0
for count, model_number in enumerate(range(0, len(SnapList))):
if count == 2:
count_x += 1
print("There were a total of {0} galaxies over the entire redshift range.".format(sum(N_halo_fesc[model_number])))
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_fesc_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_fesc_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
print(master_mean_fesc_stellar[model_number][snapshot_idx])
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_fesc_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_fesc_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[0],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
#w = np.random.randint(0,
# len(mass_global[model_number][snapshot_idx][0]),
# size=500)
#sc = ax2[count_x, count%2].scatter(mass_global[model_number][snapshot_idx][0][w],
# fesc_global[model_number][snapshot_idx][0][w],
# c=np.log10(Ngamma_global[model_number][snapshot_idx][0][w]*1.0e50),
# alpha = 0.5,cmap='plasma')
#plt.colorbar(sc)
#ax2[count_x, count%2].hexbin(mass_global[model_number][snapshot_idx],
# fesc_global[model_number][snapshot_idx],
# C=Ngamma_global[model_number][snapshot_idx])
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
adjust_paper_plots(ax, model_tags)
leg = ax[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
#leg = ax2[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
#leg.draw_frame(False) # Don't want a box frame
#for t in leg.get_texts(): # Reduce the size of the text
# t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
if paper_plots == 1:
outputFile = './%s_scatter%s' %(output_tag, output_format)
fig2.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig2)
##
def plot_reionmod_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_reionmod, std_galaxy_reionmod, N_galaxy_reionmod,
mean_galaxy_reionmod_gnedin, std_galaxy_reionmod_gnedin,
model_tags, paper_plots, output_tag):
"""
"""
def adjust_paper_plots(ax, model_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 1.05])
#ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
#ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(model_tags[count])
label = model_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(-0.1, 0.80, 0.10)
#ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
#fontsize = PlotScripts.global_fontsize)
#ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
def adjust_redshift_panels(ax, redshift_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle ReionMod\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 1.05])
#ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
#ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
label = redshift_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
print("Reionization Modifier as a function of stellar mass.")
## Array initialization ##
master_mean_reionmod_stellar, master_std_reionmod_stellar, master_N_reionmod_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_reionmod, std_galaxy_reionmod, N_galaxy_reionmod,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_reionmod_gnedin_stellar, master_std_reionmod_gnedin_stellar, master_N_reionmod_gnedin_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_reionmod_gnedin, std_galaxy_reionmod_gnedin, N_galaxy_reionmod,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
fig2, ax2 = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
count_x = 0
for count, model_number in enumerate(range(0, len(SnapList))):
if count == 2:
count_x += 1
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_reionmod_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_reionmod_stellar[model_number][snapshot_idx][w] = np.nan
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[0],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[1],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
z_labels = []
for model_number in range(0, len(SnapList)):
count_x = 0
plot_count = 0
for count, snapshot_idx in enumerate(range(len(SnapList[model_number]))):
if count == 2:
count_x += 1
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
label = model_tags[model_number]
if (model_number == 0):
z_label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
z_labels.append(z_label)
## Plots as a function of stellar mass ##
w = np.where((master_N_reionmod_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_reionmod_stellar[model_number][snapshot_idx][w] = np.nan
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx][w] = np.nan
if (model_number == 0):
print(master_mean_reionmod_stellar[model_number][snapshot_idx])
ax2[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[model_number],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
if (model_number == 0):
ax2[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_reionmod_gnedin_stellar[model_number][snapshot_idx],
color = 'k',
ls = '--',
rasterized = True, label = "Gnedin",
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
adjust_paper_plots(ax, model_tags)
print(z_labels)
adjust_redshift_panels(ax2, z_labels)
leg = ax[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
leg = ax2[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
#leg = ax2[0,0].legend(loc="upper right", numpoints=1, labelspacing=0.1)
#leg.draw_frame(False) # Don't want a box frame
#for t in leg.get_texts(): # Reduce the size of the text
# t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
## Output ##
outputFile = "{0}{1}".format(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
outputFile2 = "{0}_redshiftpanels{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
##
def plot_nion_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_Ngamma_galaxy, std_Ngamma_galaxy, N_Ngamma_galaxy,
model_tags, paper_plots, output_tag):
"""
Plots the number of ionizing photons emitted (not necessarily escaped) as a
function of galaxy stellar mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_Ngamma, std_galaxy_Ngamma, N_galaxy_Ngamma : Nested
3-dimensional array, mean_galaxy_Ngamma[model_number0][snapshot0] = [bin0_meanNgamma, ..., binN_meanNgamma], with length equal to the number of models.
Mean/Standard deviation for Ngamma in each stellar mass bin, for each
[model_number] and [snapshot_number]. N_galaxy_Ngamma is the number
of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
Ngamma units are 1.0e50 photons/s.
"""
def adjust_stellarmass_plot(ax):
#ax.axhline(0.20, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(7.8, 0.22, r"$f_\mathrm{esc, base}$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\log_{10}\langle f_{esc} N_\gamma\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([6.8, 10])
#ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_paper_plots(ax, z_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\Sigma log_{10}\langle f_{esc} N_\gamma\rangle_{M_*}}$',
size = PlotScripts.global_labelsize - 10)
ax[1,0].set_ylabel(r'$\mathbf{\Sigma log_{10}\langle f_{esc} N_\gamma\rangle_{M_*}}$',
size = PlotScripts.global_labelsize - 10)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([47, 55])
#ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
#ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(z_tags[count])
label = r"$\mathbf{z = " + \
str(int(round(float(z_tags[count])))) +\
"}$"
ax[x,y].text(0.7, 0.8, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
print("x")
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("y")
labels = ax[1,0].yaxis.get_ticklabels()
locs = ax[1,0].yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("Plotting Ngamma*fesc as a function of stellar mass.")
## Array initialization ##
master_mean_Ngamma_stellar, master_std_Ngamma_stellar, master_N_Ngamma_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_Ngamma_galaxy, std_Ngamma_galaxy, N_Ngamma_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
z_tags = np.zeros_like(model_tags, dtype=np.float32)
for model_number in range(0, len(SnapList)):
count_x = 0
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for count, snapshot_idx in enumerate(range(0, len(SnapList[model_number]))):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if count == 2:
count_x += 1
label = model_tags[model_number]
z_tags[count] = float(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
## Plots as a function of stellar mass ##
w = np.where((master_N_Ngamma_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_Ngamma_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_stellar[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_stellar[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[model_number],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
adjust_paper_plots(ax, z_tags)
leg = ax[0,0].legend(loc="upper left", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
plt.subplots_adjust(wspace = 0.0, hspace = 0.0)
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
def plot_photo_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_photo_galaxy, std_photo_galaxy, N_photo_galaxy,
model_tags, paper_plots, output_tag):
"""
Plots the photoionization rate as a function of galaxy stellar mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_photo_galaxy, std_photo_galaxy, N_photo_galaxy : Nested
3-dimensional array, mean_photo_galaxy[model_number0][snapshot0] =
[bin0_meanphoto, ..., binN_meanphoto], with length equal to the number of models.
Mean/Standard deviation for Photionization Rate in each stellar mass
bin, for each [model_number] and [snapshot_number]. N_photo_galaxy is
the number of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
Ngamma units are 1.0e50 photons/s.
"""
def adjust_stellarmass_plot(ax):
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{log_{10} \: \Gamma \: [s^{-1}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
#ax.set_ylim([0.05, 0.45])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
#tick_locs = np.arange(4.0, 11.0)
#ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="lower right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
print("Plotting photoionization rate as a function of stellar mass.")
## Array initialization ##
master_mean_photo_stellar, master_std_photo_stellar, master_N_photo_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_photo_galaxy, std_photo_galaxy, N_photo_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
pass
for model_number in range(0, len(SnapList)):
count_x = 0
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for count, snapshot_idx in enumerate(range(0, len(SnapList[model_number]))):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_photo_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_photo_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
np.log10(master_mean_photo_stellar[model_number][snapshot_idx]),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
pass
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
for model_number in range(0, len(SnapList)):
ax1.plot(np.nan, np.nan, color = 'k',
label = model_tags[model_number],
lw = PlotScripts.global_linewidth,
ls = PlotScripts.linestyles[model_number])
## Stellar Mass plots ##
if paper_plots == 0:
adjust_stellarmass_plot(ax1)
else:
pass
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
##
def plot_sfr_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_sfr, std_galaxy_sfr,
mean_galaxy_ssfr, std_galaxy_ssfr,
N_galaxy, model_tags, output_tag):
"""
Plots the specific star formation rate (sSFR) as a function of stellar mass.
Parallel compatible.
Accepts 3D arrays of the sSFR binned into Stellar Mass bins.
Mass units log(Msun).
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_ssfr, std_galaxy_ssfr, N_galaxy_ssfr : Nested 3-dimensional array,
mean_galaxy_sfr[model_number0][snapshot0] = [bin0_meanssfr, ..., binN_meanssfr],
with length equal to the number of models.
Mean/Standard deviation for sSFR in each stellar mass bin, for each [model_number] and [snapshot_number].
N_galaxy_fesc is the number of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are 1e10 Msun (no h).
"""
def adjust_sfr_plot(ax):
ax.set_xlabel(r'$\log_{10}\ M_*\ [M_{\odot}]$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle \mathrm{SFR}\rangle_{M_*}\:[M_\odot\mathrm{yr}^{-1}]}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
ax.set_ylim([-3, 2])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_ssfr_plot(ax):
ax.set_xlabel(r'$\log_{10}\ M_*\ [M_{\odot}]$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle\mathrm{sSFR}\rangle_{M_*}\:[\mathrm{yr^{-1}}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
ax.set_ylim([-9, -4])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.1))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
print("Plotting sSFR as a function of stellar mass.")
## Array initialization ##
master_mean_sfr_stellar, master_std_sfr_stellar, master_N_sfr_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_sfr, std_galaxy_sfr, N_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_ssfr_stellar, master_std_ssfr_stellar, master_N_ssfr_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_ssfr, std_galaxy_ssfr, N_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_sfr_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
ax2.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_ssfr_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
#for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
#ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
#ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
## Stellar Mass plots ##
adjust_sfr_plot(ax1)
adjust_ssfr_plot(ax2)
## Output ##
outputFile = "./{0}SFR{1}".format(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
outputFile = "./{0}sSFR{1}".format(output_tag, output_format)
fig2.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
##
def plot_fej_Ngamma(SnapList, PlotSnapList, simulation_norm,
mean_Ngamma_fej, std_Ngamma_fej,
N_fej, model_tags, output_tag):
def adjust_plot(ax):
ax.set_xlabel(r'$\mathbf{f_\mathrm{ej}}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\log_{10}\langle N_\gamma\rangle_{f_{ej}}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([0.0, 1.0])
#ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.10))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
#tick_locs = np.arange(6.0, 11.0)
#ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
## Array initialization ##
master_mean_Ngamma_fej, master_std_Ngamma_fej, master_N_Ngamma_fej, master_bin_middle_fej = \
collect_across_tasks(mean_Ngamma_fej, std_Ngamma_fej, N_fej,
SnapList, PlotSnapList, True, fej_low, fej_high,
fej_bin_width)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
label = model_tags[model_number]
w = np.where((master_N_Ngamma_fej[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_Ngamma_fej[model_number][snapshot_idx][w] = np.nan
ax1.plot(master_bin_middle_fej[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_fej[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
#ax1.plot(master_bin_middle_fej[model_number][snapshot_idx],
# np.log10(master_mean_Ngamma_fej[model_number][snapshot_idx]*1.0e50
# * master_N_Ngamma_fej[model_number][snapshot_idx]),
# color = PlotScripts.colors[plot_count],
# ls = PlotScripts.linestyles[model_number],
# rasterized = True, label = label,
#lw = PlotScripts.global_linewidth)
'''
ax2.plot(master_bin_middle_fej[model_number][snapshot_idx],
np.log10(master_N_Ngamma_fej[model_number][snapshot_idx]),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
'''
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
adjust_plot(ax1)
leg = ax1.legend(loc="upper center", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
def plot_ejectedfraction(SnapList, PlotSnapList, simulation_norm, mean_mvir_ejected,
std_mvir_ejected, N_ejected, mean_ejected_z,
std_ejected_z, N_z, model_tags, output_tag):
'''
Plots the ejected fraction as a function of the halo mass.
Parallel compatible.
Accepts a 3D array of the ejected fraction so we can plot for multiple models and redshifts.
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
mean_mvir_ejected, std_mvir_ejected, N_ejected : Nested 3-dimensional array, mean_mvir_ejected[model_number0][snapshot0] = [bin0_meanejected, ..., binN_meanejected], with length equal to the number of models.
Mean/Standard deviation for the escape fraction binned into Halo Mass bins. N_ejected is the number of data points in each bin. Bounds are given by 'm_low' and 'm_high' in bins given by 'bin_width'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Halo Mass is in units of log10(Msun).
'''
print("Plotting the Ejected Fraction as a function of halo mass.")
master_mean_ejected_halo, master_std_ejected_halo, master_N_ejected_halo, master_bin_middle_halo = \
collect_across_tasks(mean_mvir_ejected, std_mvir_ejected, N_ejected, SnapList,
PlotSnapList, True, m_low, m_high)
master_mean_ejected_z, master_std_ejected_z, master_N_ejected_z, _ = \
collect_across_tasks(mean_ejected_z, std_ejected_z, N_z, SnapList)
if rank == 0:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
for snapshot_idx in range(0, len(PlotSnapList[model_number])):
label = AllVars.SnapZ[PlotSnapList[model_number][snapshot_idx]]
ax1.plot(master_bin_middle_halo[model_number][snapshot_idx],
master_mean_ejected_halo[model_number][snapshot_idx],
color = PlotScripts.colors[snapshot_idx],
linestyle = PlotScripts.linestyles[model_number],
label = label, lw = PlotScripts.global_linewidth)
ax2.plot((AllVars.t_BigBang - AllVars.Lookback_Time[SnapList[model_number]]) * 1.0e3,
master_mean_ejected_z[model_number],
color = PlotScripts.colors[model_number],
label = model_tags[model_number],
ls = PlotScripts.linestyles[model_number],
lw = PlotScripts.global_linewidth)
for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
ax1.set_xlabel(r'$\log_{10}\ M_{\mathrm{vir}}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$\mathrm{Ejected \: Fraction}$', size = PlotScripts.global_fontsize)
ax1.set_xlim([8.0, 12])
ax1.set_ylim([-0.05, 1.0])
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.1))
ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.025))
leg = ax1.legend(loc=1, numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
outputFile = "./{0}{1}".format(output_tag, output_format)
fig1.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig1)
ax2.set_xlabel(r"$\mathbf{Time \: since \: Big \: Bang \: [Myr]}$", fontsize = PlotScripts.global_labelsize)
tick_locs = np.arange(200.0, 1000.0, 100.0)
tick_labels = [r"$\mathbf{%d}$" % x for x in tick_locs]
ax2.xaxis.set_major_locator(mtick.MultipleLocator(100))
ax2.set_xticklabels(tick_labels, fontsize = PlotScripts.global_fontsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_ylabel(r'$\mathbf{Mean f_{ej}}$', fontsize = PlotScripts.global_labelsize)
ax3 = ax2.twiny()
t_plot = (AllVars.t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$\mathbf{%d}$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax3.set_xlabel(r"$\mathbf{z}$", fontsize = PlotScripts.global_labelsize)
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax3.set_xticklabels(z_labels, fontsize = PlotScripts.global_fontsize) # But label them as redshifts.
leg = ax2.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile2 = "./{0}_z{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
##
def plot_mvir_fesc(SnapList, mass_central, fesc, model_tags, output_tag):
title = []
redshift_labels = []
mean_fesc_array = []
std_fesc_array = []
mean_halomass_array = []
std_halomass_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
redshift_labels.append([])
mean_fesc_array.append([])
std_fesc_array.append([])
mean_halomass_array.append([])
std_halomass_array.append([])
bin_middle_array.append([])
print("Plotting fesc against Mvir")
binwidth = 0.1
Frequency = 1
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
print("Doing Snapshot {0}".format(SnapList[model_number][snapshot_idx]))
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
redshift_labels[model_number].append(tmp)
minimum_mass = np.floor(min(mass_central[model_number][snapshot_idx])) - 10*binwidth
maximum_mass = np.floor(max(mass_central[model_number][snapshot_idx])) + 10*binwidth
minimum_mass = 6.0
maximum_mass = 12.0
binning_minimum = comm.allreduce(minimum_mass, op = MPI.MIN)
binning_maximum = comm.allreduce(maximum_mass, op = MPI.MAX)
halomass_nonlog = [10**x for x in mass_central[model_number][snapshot_idx]]
(mean_fesc, std_fesc, N, bin_middle) = AllVars.Calculate_2D_Mean(mass_central[model_number][snapshot_idx], fesc[model_number][snapshot_idx], binwidth, binning_minimum, binning_maximum)
mean_fesc_array[model_number], std_fesc_array[model_number] = calculate_pooled_stats(mean_fesc_array[model_number], std_fesc_array[model_number], mean_fesc, std_fesc, N)
mean_halomass_array[model_number], std_halomass_array[model_number] = calculate_pooled_stats(mean_halomass_array[model_number], std_halomass_array[model_number], np.mean(halomass_nonlog), np.std(halomass_nonlog), len(mass_central[model_number][snapshot_idx]))
## If want to do mean/etc of halo mass need to update script. ##
bin_middle_array[model_number].append(bin_middle)
mean_halomass_array[model_number] = np.log10(mean_halomass_array[model_number])
if rank == 0:
f = plt.figure()
ax1 = plt.subplot(111)
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0:
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
mean = mean_fesc_array[model_number][snapshot_idx]
std = std_fesc_array[model_number][snapshot_idx]
bin_middle = bin_middle_array[model_number][snapshot_idx]
ax1.plot(bin_middle, mean, color = colors[snapshot_idx], linestyle = linestyles[model_number], rasterized = True, label = title)
#ax1.scatter(mean_halomass_array[model_number][snapshot_idx], np.mean(~np.isnan(mean)), color = colors[snapshot_idx], marker = 'o', rasterized = True, s = 40, lw = 3)
if (len(SnapList) == 1):
ax1.fill_between(bin_middle, np.subtract(mean,std), np.add(mean,std), color = colors[snapshot_idx], alpha = 0.25)
ax1.set_xlabel(r'$\log_{10}\ M_{\mathrm{vir}}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$f_\mathrm{esc}$', size = PlotScripts.global_fontsize)
#ax1.set_xlim([8.5, 12])
#ax1.set_ylim([0.0, 1.0])
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.1))
# ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.1))
# ax1.set_yscale('log', nonposy='clip')
# for model_number in range(0, len(SnapList)):
# ax1.plot(1e100, 1e100, color = 'k', ls = linestyles[model_number], label = model_tags[model_number], rasterized=True)
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
outputFile = './' + output_tag + output_format
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to'.format(outputFile))
plt.close()
##
def plot_mvir_Ngamma(SnapList, mean_mvir_Ngamma, std_mvir_Ngamma, N_Ngamma, model_tags, output_tag,fesc_prescription=None, fesc_normalization=None, fitpath=None):
'''
Plots the number of ionizing photons (pure ngamma times fesc) as a function of halo mass.
Parallel compatible.
The input data has been binned as a function of halo virial mass (Mvir), with the bins defined at the top of the file (m_low, m_high, bin_width).
Accepts 3D arrays to plot ngamma for multiple models.
Parameters
----------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
mean_mvir_Ngamma, std_mvir_Ngamma, N_Ngamma : Nested 2-dimensional array, mean_mvir_Ngamma[model_number0][snapshot0] = [bin0_meanNgamma, ..., binN_meanNgamma], with length equal to the number of bins.
Mean/Standard deviation/number of data points in each halo mass (Mvir) bin.
The number of photons is in units of 1.0e50 s^-1.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
fesc_prescription : int (optional)
If this parameter is defined, we will save the Mvir-Ngamma results in a text file (not needed if not saving).
Number that controls what escape fraction prescription was used to generate the escape fractions.
0 : Constant, fesc = Constant.
1 : Scaling with Halo Mass, fesc = A*Mh^B.
2 : Scaling with ejected fraction, fesc = fej*A + B.
fesc_normalization : float (if fesc_prescription == 0) or `numpy.darray' with length 2 (if fesc_prescription == 1 or == 2) (optional).
If this parameter is defined, we will save the Mvir-Ngamma results in a text file (not needed if not saving).
Parameter not needed if you're not saving the Mvir-Ngamma results.
If fesc_prescription == 0, gives the constant value for the escape fraction.
If fesc_prescription == 1 or == 2, gives A and B with the form [A, B].
fitpath : string (optional)
If this parameter is defined, we will save the Mvir-Ngamma results in a text file (not needed if not saving).
Defines the base path for where we are saving the results.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Ngamma is in units of 1.0e50 s^-1.
'''
print("Plotting ngamma*fesc against the halo mass")
## Array initialization. ##
title = []
redshift_labels = []
mean_ngammafesc_array = []
std_ngammafesc_array = []
mean_halomass_array = []
std_halomass_array = []
bin_middle_array = []
for model_number in range(0, len(SnapList)):
redshift_labels.append([])
mean_ngammafesc_array.append([])
std_ngammafesc_array.append([])
mean_halomass_array.append([])
std_halomass_array.append([])
bin_middle_array.append([])
for model_number in range(0, len(SnapList)):
for snapshot_idx in range(0, len(SnapList[model_number])):
print("Doing Snapshot {0}".format(SnapList[model_number][snapshot_idx]))
tmp = 'z = %.2f' %(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
redshift_labels[model_number].append(tmp)
N = N_Ngamma[model_number][snapshot_idx]
mean_ngammafesc_array[model_number], std_ngammafesc_array[model_number] = calculate_pooled_stats(mean_ngammafesc_array[model_number], std_ngammafesc_array[model_number], mean_mvir_Ngamma[model_number][snapshot_idx], std_mvir_Ngamma[model_number][snapshot_idx], N) # Collate the values from all processors.
bin_middle_array[model_number].append(np.arange(m_low, m_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
if rank == 0:
f = plt.figure()
ax1 = plt.subplot(111)
for model_number in range(0, len(SnapList)):
count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if model_number == 0:
title = redshift_labels[model_number][snapshot_idx]
else:
title = ''
mean = np.zeros((len(mean_ngammafesc_array[model_number][snapshot_idx])), dtype = np.float32)
std = np.zeros((len(mean_ngammafesc_array[model_number][snapshot_idx])), dtype=np.float32)
for i in range(0, len(mean)):
if(mean_ngammafesc_array[model_number][snapshot_idx][i] < 1e-10):
mean[i] = np.nan
std[i] = np.nan
else:
mean[i] = np.log10(mean_ngammafesc_array[model_number][snapshot_idx][i] * 1.0e50) # Remember that the input data is in units of 1.0e50 s^-1.
std[i] = 0.434 * std_ngammafesc_array[model_number][snapshot_idx][i] / mean_ngammafesc_array[model_number][snapshot_idx][i] # We're plotting in log space so the standard deviation is 0.434*log10(std)/log10(mean).
bin_middle = bin_middle_array[model_number][snapshot_idx]
if (count < 4): # Only plot at most 5 lines.
ax1.plot(bin_middle, mean, color = PlotScripts.colors[snapshot_idx], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
count += 1
## In this block we save the Mvir-Ngamma results to a file. ##
if (fesc_prescription == None or fesc_normalization == None or fitpath == None):
raise ValueError("You've specified you want to save the Mvir-Ngamma results but haven't provided an escape fraction prescription, normalization and base path name")
# Note: All the checks that escape fraction normalization was written correctly were performed in 'calculate_fesc()', hence it will be correct by this point and we don't need to double check.
if (fesc_prescription[model_number] == 0): # Slightly different naming scheme for the constant case (it only has a float for fesc_normalization).
fname = "%s/fesc%d_%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number], AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
elif (fesc_prescription[model_number] == 1 or fesc_prescription[model_number] == 2):
fname = "%s/fesc%d_A%.3eB%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number][0], fesc_normalization[model_number][1], AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
f = open(fname, "w+")
if not os.access(fname, os.W_OK):
print("The filename is {0}".format(fname))
raise ValueError("Can't write to this file.")
for i in range(0, len(bin_middle)):
f.write("%.4f %.4f %.4f %d\n" %(bin_middle[i], mean[i], std[i], N_Ngamma[model_number][snapshot_idx][i]))
f.close()
print("Wrote successfully to file {0}".format(fname))
##
for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
ax1.set_xlabel(r'$\log_{10}\ M_{\mathrm{vir}}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$\log_{10}\ \dot{N}_\gamma \: f_\mathrm{esc} \: [\mathrm{s}^{-1}]$', size = PlotScripts.global_fontsize)
ax1.set_xlim([8.5, 12])
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.1))
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
outputFile = './' + output_tag + output_format
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to'.format(outputFile))
plt.close()
def bin_Simfast_halos(RedshiftList, SnapList, halopath, fitpath, fesc_prescription, fesc_normalization, GridSize, output_tag):
for model_number in range(0, len(fesc_prescription)):
for halo_z_idx in range(0, len(RedshiftList)):
snapshot_idx = min(range(len(SnapList)), key=lambda i: abs(SnapList[i]-RedshiftList[halo_z_idx])) # This finds the index of the simulation redshift that most closely matches the Halo redshift.
print("Binning Halo redshift {0}".format(RedshiftList[halo_z_idx]))
print("For the Halo redshift {0:.3f} the nearest simulation redshift is {1:.3f}".format(RedshiftList[halo_z_idx], SnapList[snapshot_idx]))
if (fesc_prescription[model_number] == 0):
fname = "%s/fesc%d_%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number], AllVars.SnapZ[snapshot_idx])
elif (fesc_prescription[model_number] == 1 or fesc_prescription[model_number] == 2):
fname = "%s/fesc%d_A%.3eB%.3f_z%.3f.txt" %(fitpath, fesc_prescription[model_number], fesc_normalization[model_number][0], fesc_normalization[model_number][1], AllVars.SnapZ[snapshot_idx])
print("Reading in file {0}".format(fname))
## Here we read in the results from the Mvir-Ngamma binning. ##
f = open(fname, 'r')
fit_mvir, fit_mean, fit_std, fit_N = np.loadtxt(f, unpack = True)
f.close()
## Here we read in the halos created by Simfast21 ##
# The data file has the structure:
# long int N_halos
# Then an entry for each halo:
# float Mass
# float x, y, z positions.
# NOTE: The x,y,z positions are the grid indices but are still floats (because Simfast21 is weird like that).
Halodesc_full = [
('Halo_Mass', np.float32),
('Halo_x', np.float32),
('Halo_y', np.float32),
('Halo_z', np.float32)
]
names = [Halodesc_full[i][0] for i in range(len(Halodesc_full))]
formats = [Halodesc_full[i][1] for i in range(len(Halodesc_full))]
Halo_Desc = np.dtype({'names':names, 'formats':formats}, align=True)
fname = "%s/halonl_z%.3f_N%d_L100.0.dat.catalog" %(halopath, RedshiftList[halo_z_idx], GridSize)
f = open(fname, 'rb')
N_Halos = np.fromfile(f, count = 1, dtype = np.long)
Halos = np.fromfile(f, count = N_Halos, dtype = Halo_Desc)
binned_nion = np.zeros((GridSize*GridSize*GridSize), dtype = float32) # This grid will contain the ionizing photons that results from the binning.
binned_Halo_Mass = np.digitize(np.log10(Halos['Halo_Mass']), fit_mvir) # Places the Simfast21 halos into the correct halo mass bins defined by the Mvir-Ngamma results.
binned_Halo_Mass[binned_Halo_Mass == len(fit_mvir)] = len(fit_mvir) - 1 # Fixes up the edge case.
## Fore each Halo we now assign it an ionizing flux. ##
# This flux is determined by drawing a random number from a normal distribution with mean and standard deviation given by the Mvir-Ngamma results.
# NOTE: Remember the Mvir-Ngamma results are in units of log10(s^-1).
fit_nan = 0
for i in range(0, N_Halos):
if(np.isnan(fit_mean[binned_Halo_Mass[i]]) == True or np.isnan(fit_std[binned_Halo_Mass[i]]) == True): # This halo had mass that was not covered by the Mvir-Ngamma fits.
fit_nan += 1
continue
nion_halo = np.random.normal(fit_mean[binned_Halo_Mass[i]], fit_std[binned_Halo_Mass[i]])
## Because of how Simfast21 does their binning, we have some cases where the Halos are technically outside the box. Just fix them up. ##
x_grid = int(Halos['Halo_x'][i])
if x_grid >= GridSize:
x_grid = GridSize - 1
if x_grid < 0:
x_grid = 0
y_grid = int(Halos['Halo_y'][i])
if y_grid >= GridSize:
y_grid = GridSize - 1
if y_grid < 0:
y_grid = 0
z_grid = int(Halos['Halo_z'][i])
if z_grid >= GridSize:
z_grid = GridSize - 1
if z_grid < 0:
z_grid = 0
idx = x_grid * GridSize*GridSize + y_grid * GridSize + z_grid
binned_nion[idx] += pow(10, nion_halo)/1.0e50
# print"We had %d halos (out of %d, so %.4f fraction) that had halo mass that was not covered by the Mvir-Ngamma results." %(fit_nan, N_Halos, float(fit_nan)/float(N_Halos))
# print "There were %d cells with a non-zero ionizing flux." %(len(binned_nion[binned_nion != 0]))
binned_nion = binned_nion.reshape((GridSize,GridSize,GridSize))
cut_slice = 0
cut_width = 512
nion_slice = binned_nion[:,:, cut_slice:cut_slice+cut_width].mean(axis=-1)*1.0e50
ax1 = plt.subplot(211)
im = ax1.imshow(np.log10(nion_slice), interpolation='bilinear', origin='low', extent =[0,AllVars.BoxSize,0,AllVars.BoxSize], cmap = 'Purples', vmin = 48, vmax = 53)
cbar = plt.colorbar(im, ax = ax1)
cbar.set_label(r'$\mathrm{log}_{10}N_{\gamma} [\mathrm{s}^{-1}]$')
ax1.set_xlabel(r'$\mathrm{x} (h^{-1}Mpc)$')
ax1.set_ylabel(r'$\mathrm{y} (h^{-1}Mpc)$')
ax1.set_xlim([0.0, AllVars.BoxSize])
ax1.set_ylim([0.0, AllVars.BoxSize])
title = r"$z = %.3f$" %(RedshiftList[halo_z_idx])
ax1.set_title(title)
ax2 = plt.subplot(212)
w = np.where((Halos['Halo_z'][:] > cut_slice) & (Halos['Halo_z'][:] <= cut_slice + cut_width))[0]
x_plot = Halos['Halo_x'] * float(AllVars.BoxSize)/float(GridSize)
y_plot = Halos['Halo_y'] * float(AllVars.BoxSize)/float(GridSize)
z_plot = Halos['Halo_z'][w] * float(AllVars.BoxSize)/float(GridSize)
ax2.scatter(x_plot[w], y_plot[w], s = 2, alpha = 0.5)
ax2.set_xlabel(r'$\mathrm{x} (h^{-1}Mpc)$')
ax2.set_ylabel(r'$\mathrm{y} (h^{-1}Mpc)$')
ax2.set_xlim([0.0, AllVars.BoxSize])
ax2.set_ylim([0.0, AllVars.BoxSize])
tmp = "z%.3f" %(RedshiftList[halo_z_idx])
plt.tight_layout()
outputFile = './' + output_tag + tmp + output_format
plt.savefig(outputFile) # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
def plot_photoncount(SnapList, sum_nion, simulation_norm, FirstFile, LastFile, NumFiles, model_tags, output_tag):
'''
Plots the ionizing emissivity as a function of redshift.
We normalize the emissivity to Mpc^-3 and this function allows the read-in of only a subset of the volume.
Parallel compatible.
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model, defines the x-axis we plot against.
sum_nion : Nested 1-dimensional array, sum_nion[z0, z1, ..., zn], with length equal to the number of redshifts.
Number of escape ionizing photons (i.e., photon rate times the local escape fraction) at each redshift.
In units of 1.0e50 s^-1.
simulation_norm : array of ints with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
sum_nion is in units of 1.0e50 s^-1.
'''
print("Plotting the ionizing emissivity.")
sum_array = []
for model_number in range(0, len(SnapList)):
if(simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
sum_array.append([])
for snapshot_idx in range(0, len(SnapList[model_number])):
nion_sum_snapshot = comm.reduce(sum_nion[model_number][snapshot_idx], op = MPI.SUM, root = 0)
if rank == 0:
sum_array[model_number].append(nion_sum_snapshot * 1.0e50 / (pow(AllVars.BoxSize / AllVars.Hubble_h,3) * (float(LastFile[model_number] - FirstFile[model_number] + 1) / float(NumFiles[model_number]))))
if (rank == 0):
ax1 = plt.subplot(111)
for model_number in range(0, len(SnapList)):
if(simulation_norm[model_number] == 0):
cosmo = AllVars.Set_Params_Mysim()
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
t = np.empty(len(SnapList[model_number]))
for snapshot_idx in range(0, len(SnapList[model_number])):
t[snapshot_idx] = (AllVars.t_BigBang - cosmo.lookback_time(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]).value) * 1.0e3
t = [t for t, N in zip(t, sum_array[model_number]) if N > 1.0]
sum_array[model_number] = [x for x in sum_array[model_number] if x > 1.0]
print("The total number of ionizing photons for model {0} is {1} s^1 Mpc^-3".format(model_number, sum(sum_array[model_number])))
print(np.log10(sum_array[model_number]))
ax1.plot(t, np.log10(sum_array[model_number]), color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
#ax1.fill_between(t, np.subtract(mean,std), np.add(mean,std), color = colors[model_number], alpha = 0.25)
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
#ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.025))
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_ylim([48.5, 51.5])
ax2 = ax1.twiny()
t_plot = (AllVars.t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax1.set_ylabel(r'$\sum f_\mathrm{esc}\dot{N}_\gamma \: [\mathrm{s}^{-1}\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
plot_time = 1
bouwens_z = np.arange(6,16) # Redshift range for the observations.
bouwens_t = (AllVars.t_BigBang - cosmo.lookback_time(bouwens_z).value) * 1.0e3 # Corresponding values for what we will plot on the x-axis.
bouwens_1sigma_lower = [50.81, 50.73, 50.60, 50.41, 50.21, 50.00, 49.80, 49.60, 49.39, 49.18] # 68% Confidence Intervals for the ionizing emissitivity from Bouwens 2015.
bouwens_1sigma_upper = [51.04, 50.85, 50.71, 50.62, 50.56, 50.49, 50.43, 50.36, 50.29, 50.23]
bouwens_2sigma_lower = [50.72, 50.69, 50.52, 50.27, 50.01, 49.75, 49.51, 49.24, 48.99, 48.74] # 95% CI.
bouwens_2sigma_upper = [51.11, 50.90, 50.74, 50.69, 50.66, 50.64, 50.61, 50.59, 50.57, 50.55]
if plot_time == 1:
ax1.fill_between(bouwens_t, bouwens_1sigma_lower, bouwens_1sigma_upper, color = 'k', alpha = 0.2)
ax1.fill_between(bouwens_t, bouwens_2sigma_lower, bouwens_2sigma_upper, color = 'k', alpha = 0.4, label = r"$\mathrm{Bouwens \: et \: al. \: (2015)}$")
else:
ax1.fill_between(bouwens_z, bouwens_1sigma_lower, bouwens_1sigma_upper, color = 'k', alpha = 0.2)
ax1.fill_between(bouwens_z, bouwens_2sigma_lower, bouwens_2sigma_upper, color = 'k', alpha = 0.4, label = r"$\mathrm{Bouwens \: et \: al. \: (2015)}$")
# ax1.text(0.075, 0.965, '(a)', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes)
ax1.text(350, 50.0, r"$68\%$", horizontalalignment='center', verticalalignment = 'center', fontsize = PlotScripts.global_labelsize)
ax1.text(350, 50.8, r"$95\%$", horizontalalignment='center', verticalalignment = 'center', fontsize = PlotScripts.global_labelsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
plt.tight_layout()
outputFile = './{0}{1}'.format(output_tag, output_format)
plt.savefig(outputFile) # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_singleSFR(galaxies_filepath_array, merged_galaxies_filepath_array, number_snapshots, simulation_norm, model_tags, output_tag):
SFR_gal = []
SFR_ensemble = []
ejected_gal = []
ejected_ensemble = []
infall_gal = []
infall_ensemble = []
ejectedmass_gal = []
ejectedmass_ensemble = []
N_random = 1
ax1 = plt.subplot(111)
# ax3 = plt.subplot(122)
#ax5 = plt.subplot(133)
look_for_alive = 1
#idx_array = [20004, 20005, 20016]
#halonr_array = [7381]
halonr_array = [389106]
#halonr_array = [36885]
for model_number in range(0, len(model_tags)):
if(simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
SFR_gal.append([])
SFR_ensemble.append([])
ejected_gal.append([])
ejected_ensemble.append([])
infall_gal.append([])
infall_ensemble.append([])
ejectedmass_gal.append([])
ejectedmass_ensemble.append([])
GG, Gal_Desc = ReadScripts.ReadGals_SAGE_DelayedSN(galaxies_filepath_array[model_number], 0, number_snapshots[model_number], comm) # Read in the correct galaxy file.
G_Merged, Merged_Desc = ReadScripts.ReadGals_SAGE_DelayedSN(merged_galaxies_filepath_array[model_number], 0, number_snapshots[model_number], comm) # Also need the merged galaxies.
G = ReadScripts.Join_Arrays(GG, G_Merged, Gal_Desc) # Then join them together for all galaxies that existed at this Redshift.
if look_for_alive == 1:
G.GridHistory[G.GridHistory >= 0] = 1
G.GridHistory[G.GridHistory < 0] = 0
alive = np.sum(G.GridHistory, axis = 1)
# print "The galaxy that was present in the most snapshots is %d which was in %d snaps" %(np.argmax(alive), np.amax(alive))
most_alive = alive.argsort()[-10:][::-1] # Finds the 3 galaxies alive for the most snapshots. Taken from https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
# print G.HaloNr[most_alive]
t = np.empty((number_snapshots[model_number]))
for snapshot_idx in range(0, number_snapshots[model_number]):
w = np.where((G.GridHistory[:, snapshot_idx] != -1) & (G.GridStellarMass[:, snapshot_idx] > 0.0) & (G.GridStellarMass[:, snapshot_idx] < 1e5) & (G.GridFoFMass[:, snapshot_idx] >= m_low_SAGE) & (G.GridFoFMass[:, snapshot_idx] <= m_high_SAGE))[0] # Only include those galaxies that existed at the current snapshot, had positive (but not infinite) stellar/Halo mass and Star formation rate.
SFR_ensemble[model_number].append(np.mean(G.GridSFR[w,snapshot_idx]))
ejected_ensemble[model_number].append(np.mean(G.GridOutflowRate[w, snapshot_idx]))
infall_ensemble[model_number].append(np.mean(G.GridInfallRate[w, snapshot_idx]))
t[snapshot_idx] = (t_BigBang - cosmo.lookback_time(AllVars.SnapZ[snapshot_idx]).value) * 1.0e3
for p in range(0, N_random):
random_idx = (np.where((G.HaloNr == halonr_array[p]))[0])[0]
SFR_gal[model_number].append(G.GridSFR[random_idx]) # Remember the star formation rate history of the galaxy.
ejected_gal[model_number].append(G.GridOutflowRate[random_idx])
infall_gal[model_number].append(G.GridInfallRate[random_idx])
ejectedmass_gal[model_number].append(G.GridEjectedMass[random_idx])
#SFR_gal[model_number][p][SFR_gal[model_number][p] < 1.0e-15] = 1
for snapshot_idx in range(0, number_snapshots[model_number]):
if snapshot_idx == 0:
pass
elif(G.GridHistory[random_idx, snapshot_idx] == -1):
SFR_gal[model_number][p][snapshot_idx] = SFR_gal[model_number][p][snapshot_idx - 1]
# SFR_ensemble[model_number] = np.nan_to_num(SFR_ensemble[model_number])
# SFR_ensemble[model_number][SFR_ensemble[model_number] < 1.0e-15] = 1
# ejected_ensemble[model_number][ejected_ensemble[model_number] < 1.0e-15] = 1
ax1.plot(t, SFR_ensemble[model_number], color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[model_number], label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
ax1.plot(t, ejected_ensemble[model_number], color = PlotScripts.colors[1], linestyle = PlotScripts.linestyles[model_number], linewidth = PlotScripts.global_linewidth, alpha = 1.0)
#ax5.plot(t, infall_ensemble[model_number], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], linewidth = PlotScripts.global_linewidth, alpha = 1.0)
#ax5.plot(t, ejectedmass_ensemble[model_number], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], linewidth = PlotScripts.global_linewidth, alpha = 1.0)
for p in range(0, N_random):
ax1.plot(t, SFR_gal[model_number][p], color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
ax1.plot(t, ejected_gal[model_number][p], color = PlotScripts.colors[1], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
#ax5.plot(t, infall_gal[model_number][p], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
#ax5.plot(t, ejectedmass_gal[model_number][p], color = PlotScripts.colors[2], linestyle = PlotScripts.linestyles[model_number], alpha = 0.5, linewidth = 1)
#ax1.plot(t, SFR_gal[model_number][p], color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[model_number], alpha = 1.0, linewidth = 1, label = model_tags[model_number])
#ax1.plot(t, ejected_gal[model_number][p], color = PlotScripts.colors[1], linestyle = PlotScripts.linestyles[model_number], alpha = 1.0, linewidth = 1, label = model_tags[model_number])
ax1.plot(np.nan, np.nan, color = 'r', linestyle = '-', label = "SFR")
ax1.plot(np.nan, np.nan, color = 'b', linestyle = '-', label = "Outflow")
# exit()
#ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], label = 'SFR')
#ax1.plot(np.nan, np.nan, color = PlotScripts.colors[1], label = 'Outflow')
ax1.set_yscale('log', nonposy='clip')
ax1.set_ylabel(r"$\mathrm{Mass \: Flow} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$")
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_ylim([1e-6, 1e3])
'''
ax3.set_yscale('log', nonposy='clip')
ax3.set_ylabel(r"$\mathrm{Outflow \: Rate} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$")
ax3.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_ylim([1e-8, 1e3])
ax5.set_yscale('log', nonposy='clip')
#ax5.set_ylabel(r"$\mathrm{Infall \: Rate} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$")
ax5.set_ylabel(r"$\mathrm{Ejected Mass} [\mathrm{M}_\odot]$")
ax5.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_fontsize)
ax5.set_xlim(PlotScripts.time_xlim)
#ax5.set_ylim([1e-8, 1e3])
ax5.set_ylim([1e6, 1e10])
'''
ax2 = ax1.twiny()
#ax4 = ax3.twiny()
#ax6 = ax5.twiny()
t_plot = (t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
'''
ax4.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax4.set_xlim(PlotScripts.time_xlim)
ax4.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax4.set_xticklabels(z_labels) # But label them as redshifts.
ax6.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax6.set_xlim(PlotScripts.time_xlim)
ax6.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax6.set_xticklabels(z_labels) # But label them as redshifts.
'''
plt.tight_layout()
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = './Halo%d_mlow%.2f_%s%s' %(halonr_array[0], m_low_SAGE, output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_quasars_count(SnapList, PlotList, N_quasars_z, N_quasars_boost_z, N_gal_z, mean_quasar_activity, std_quasar_activity, N_halo, N_merger_halo, N_gal, N_merger_galaxy, fesc_prescription, simulation_norm, FirstFile, LastFile, NumFile, model_tags, output_tag):
'''
Parameters
---------
SnapList : Nested 'array-like` of ints, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots that we plot the quasar density at for each model.
PlotList : Nested array of ints, PlotList[model_number0]= [plotsnapshot0_model0, ..., plotsnapshotN_model0], with length equal to the number of models.
Snapshots that will be plotted for the quasar activity as a function of halo mass.
N_quasars_z : Nested array of floats, N_quasars_z[model_number0] = [N_quasars_z0, N_quasars_z1, ..., N_quasars_zN]. Outer array has length equal to the number of models, inner array has length equal to length of the model's SnapList.
Number of quasars, THAT WENT OFF, during the given redshift.
N_quasars_boost_z : Nested array of floats, N_quasars_boost_z[model_number0] = [N_quasars_boost_z0, N_quasars_boost_z1, ..., N_quasars_boost_zN]. Outer array has length equal to the number of models, inner array has length equal to length of the model's SnapList.
Number of galaxies that had their escape fraction boosted by quasar activity.
N_gal_z : Nested array of floats, N_gal_z[model_number0] = [N_gal_z0, N_gal_z1, ..., N_gal_zN]. Outer array has length equal to the number of models, inner array has length equal to length of the model's SnapList.
Number of galaxies at each redshift.
mean_quasar_activity, std_quasar_activity : Nested 2-dimensional array of floats, mean_quasar_activity[model_number0][snapshot0] = [bin0quasar_activity, ..., binNquasar_activity]. Outer array has length equal to the number of models, inner array has length equal to the length of the model's snaplist and most inner array has length equal to the number of halo bins (NB).
Mean/std fraction of galaxies that had quasar go off during each snapshot as a function of halo mass.
NOTE : This is for quasars going off, not for galaxies that have their escape fraction being boosted.
fesc_prescription : Array with length equal to the number of models.
Denotes what escape fraction prescription each model used. Quasars are only tracked when fesc_prescription == 3.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
FirstFile, LastFile, NumFile : array of integers with length equal to the number of models.
The file numbers for each model that were read in (defined by the range between [FirstFile, LastFile] inclusive) and the TOTAL number of files for this model (we may only be plotting a subset of the volume).
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated. File will be saved in the current directory with the output format defined by the 'output_format' variable at the beggining of the file.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
No relevant units.
'''
print("Plotting quasar count/density")
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax6 = ax1.twinx()
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
ax5 = ax3.twinx()
fig3 = plt.figure()
ax7 = fig3.add_subplot(111)
fig4 = plt.figure()
ax50 = fig4.add_subplot(111)
fig5 = plt.figure()
ax55 = fig5.add_subplot(111)
fig6 = plt.figure()
ax56 = fig6.add_subplot(111)
mean_quasar_activity_array = []
std_quasar_activity_array = []
N_quasar_activity_array = []
N_gal_halo_array = []
N_gal_array = []
merger_counts_halo_array = []
merger_counts_galaxy_array = []
bin_middle_halo_array = []
bin_middle_galaxy_array = []
for model_number in range(0, len(SnapList)): # Does this for each of the models.
if (fesc_prescription[model_number] != 3): # Want to skip the models that didn't count quasars.
continue
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif (simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
mean_quasar_activity_array.append([])
std_quasar_activity_array.append([])
N_quasar_activity_array.append([])
N_gal_halo_array.append([])
N_gal_array.append([])
merger_counts_halo_array.append([])
merger_counts_galaxy_array.append([])
bin_middle_halo_array.append([])
bin_middle_galaxy_array.append([])
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
print("We are plotting the quasar density using {0:.4f} of the box's volume.".format(box_factor))
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * box_factor
####
## We perform the plotting on Rank 0 so only this rank requires the final counts array. ##
if rank == 0:
quasars_total = np.zeros_like((N_quasars_z[model_number]))
boost_total = np.zeros_like(N_quasars_boost_z[model_number])
gal_count_total = np.zeros_like(N_gal_z[model_number])
else:
quasars_total = None
boost_total = None
gal_count_total = None
N_quasars_tmp = np.array((N_quasars_z[model_number])) # So we can use MPI.Reduce()
comm.Reduce([N_quasars_tmp, MPI.DOUBLE], [quasars_total, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the number of quasars and passes back to rank 0.
N_quasars_boost_tmp = np.array(N_quasars_boost_z[model_number]) # So we can use MPI.Reduce()
comm.Reduce([N_quasars_boost_tmp, MPI.DOUBLE], [boost_total, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the number of galaxies that had their fesc boosted.
N_gal_tmp = np.array(N_gal_z[model_number]) # So we can use MPI.Reduce()
comm.Reduce([N_gal_tmp, MPI.DOUBLE], [gal_count_total, MPI.DOUBLE], op = MPI.SUM, root = 0) # Sum the number of total galaxies.
for snapshot_idx in range(len(SnapList[model_number])):
mean_quasar_activity_array[model_number], std_quasar_activity_array[model_number], N_quasar_activity_array[model_number] = calculate_pooled_stats(mean_quasar_activity_array[model_number], std_quasar_activity_array[model_number], N_quasar_activity_array[model_number], mean_quasar_activity[model_number][snapshot_idx], std_quasar_activity[model_number][snapshot_idx], N_halo[model_number][snapshot_idx])
if rank == 0:
merger_count_halo_total = np.zeros_like((N_merger_halo[model_number][snapshot_idx]))
N_gal_halo_total = np.zeros_like((N_halo[model_number][snapshot_idx]))
merger_count_galaxy_total = np.zeros_like((N_merger_galaxy[model_number][snapshot_idx]))
N_gal_total = np.zeros_like((N_gal[model_number][snapshot_idx]))
else:
merger_count_halo_total = None
N_gal_halo_total = None
merger_count_galaxy_total = None
N_gal_total = None
comm.Reduce([N_merger_halo[model_number][snapshot_idx], MPI.FLOAT], [merger_count_halo_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
comm.Reduce([N_halo[model_number][snapshot_idx], MPI.FLOAT], [N_gal_halo_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
comm.Reduce([N_merger_galaxy[model_number][snapshot_idx], MPI.FLOAT], [merger_count_galaxy_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
comm.Reduce([N_gal[model_number][snapshot_idx], MPI.FLOAT], [N_gal_total, MPI.FLOAT], op = MPI.SUM, root = 0) # Sum all the stellar mass and pass to Rank 0.
if rank == 0:
merger_counts_halo_array[model_number].append(merger_count_halo_total)
N_gal_halo_array[model_number].append(N_gal_halo_total)
merger_counts_galaxy_array[model_number].append(merger_count_galaxy_total)
N_gal_array[model_number].append(N_gal_total)
bin_middle_halo_array[model_number].append(np.arange(m_low, m_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
bin_middle_galaxy_array[model_number].append(np.arange(m_gal_low, m_gal_high+bin_width, bin_width)[:-1] + bin_width * 0.5)
if rank == 0:
plot_count = 0
stop_plot = 0
title = model_tags[model_number]
t = np.empty(len(SnapList[model_number]))
ZZ = np.empty(len(SnapList[model_number]))
for snapshot_idx in range(0, len(SnapList[model_number])):
t[snapshot_idx] = (AllVars.t_BigBang - AllVars.Lookback_Time[SnapList[model_number][snapshot_idx]]) * 1.0e3
ZZ[snapshot_idx] = AllVars.SnapZ[SnapList[model_number][snapshot_idx]]
if (stop_plot == 0):
# print("Snapshot {0} PlotSnapshot "
#"{1}".format(SnapList[model_number][snapshot_idx], PlotList[model_number][plot_count]))
if (SnapList[model_number][snapshot_idx] == PlotList[model_number][plot_count]):
label = "z = {0:.2f}".format(AllVars.SnapZ[PlotList[model_number][plot_count]])
ax7.plot(bin_middle_halo_array[model_number][snapshot_idx], mean_quasar_activity_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
#ax50.plot(bin_middle_halo_array[model_number][snapshot_idx], merger_counts_array[model_number][snapshot_idx] / gal_count_total[snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
ax50.plot(bin_middle_halo_array[model_number][snapshot_idx], merger_counts_halo_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
#ax50.plot(bin_middle_halo_array[model_number][snapshot_idx], merger_counts_array[model_number][snapshot_idx] / N_gal_halo_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
#ax55.plot(bin_middle_galaxy_array[model_number][snapshot_idx], merger_counts_galaxy_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
ax55.plot(bin_middle_galaxy_array[model_number][snapshot_idx],
merger_counts_galaxy_array[model_number][snapshot_idx] / N_gal_array[model_number][snapshot_idx], color = PlotScripts.colors[plot_count], linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = label, linewidth = PlotScripts.global_linewidth)
print("plot_count = {0} len(PlotList) = {1}".format(plot_count,
len(PlotList[model_number])))
plot_count += 1
print("plot_count = {0} len(PlotList) = {1}".format(plot_count,
len(PlotList[model_number])))
if (plot_count == len(PlotList[model_number])):
stop_plot = 1
print("For Snapshot {0} at t {3} there were {1} total mergers compared to {2} total galaxies.".format(snapshot_idx, np.sum(merger_counts_galaxy_array[model_number][snapshot_idx]), np.sum(gal_count_total[snapshot_idx]), t[snapshot_idx]))
if (np.sum(gal_count_total[snapshot_idx]) > 0.0 and np.sum(merger_counts_galaxy_array[model_number][snapshot_idx]) > 0.0):
ax56.scatter(t[snapshot_idx], np.sum(merger_counts_galaxy_array[model_number][snapshot_idx]) / np.sum(gal_count_total[snapshot_idx]), color = 'r', rasterized = True)
#ax56.scatter(t[snapshot_idx], quasars_total[snapshot_idx] / np.sum(gal_count_total[snapshot_idx]), color = 'r', rasterized = True)
ax1.plot(t, quasars_total / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, linewidth = PlotScripts.global_linewidth)
p = np.where((ZZ < 15))[0]
#ax1.plot(ZZ[p], quasars_total[p] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax3.plot(t, boost_total, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[0], rasterized = True, label = title, linewidth = PlotScripts.global_linewidth)
w = np.where((gal_count_total > 0.0))[0] # Since we're doing a division, need to only plot those redshifts that actually have galaxies.
ax5.plot(t[w], np.divide(boost_total[w], gal_count_total[w]), color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax6.plot(t[w], gal_count_total[w] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
#ax6.plot(ZZ[p], gal_count_total[p] / norm, color = PlotScripts.colors[model_number], linestyle = PlotScripts.linestyles[1], rasterized = True, linewidth = PlotScripts.global_linewidth)
ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[0], label = "Quasar Ejection Density")
ax1.plot(np.nan, np.nan, color = PlotScripts.colors[0], linestyle = PlotScripts.linestyles[1], label = "Galaxy Density")
ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[0], label = "Count")
ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[1], label = "Fraction of Galaxies")
ax7.set_xlabel(r'$\log_{10}\ M_\mathrm{vir}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax7.set_ylabel(r'$\mathrm{Mean \: Quasar \: Activity}$', size = PlotScripts.global_fontsize)
ax50.set_xlabel(r'$\log_{10}\ M_\mathrm{vir}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
#ax50.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax50.set_ylabel(r'$\mathrm{Number \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax55.set_xlabel(r'$\log_{10}\ M_\mathrm{*}\ [M_{\odot}]$', size = PlotScripts.global_fontsize)
ax55.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
#ax55.set_ylabel(r'$\mathrm{Number \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
ax56.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
ax56.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Undergoing \: Merger}$', size = PlotScripts.global_fontsize)
#ax56.set_ylabel(r'$\mathrm{Fraction \: Galaxies \: Quasar \: Activity}$', size = PlotScripts.global_fontsize)
ax56.set_yscale('log', nonposy='clip')
ax50.axvline(np.log10(32.0*AllVars.PartMass / AllVars.Hubble_h), color = 'k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
ax1.set_xlim(PlotScripts.time_xlim)
ax1.set_yscale('log', nonposy='clip')
ax3.xaxis.set_minor_locator(mtick.MultipleLocator(PlotScripts.time_tickinterval))
ax3.set_xlim(PlotScripts.time_xlim)
ax3.set_yscale('log', nonposy='clip')
## Create a second axis at the top that contains the corresponding redshifts. ##
## The redshift defined in the variable 'z_plot' will be displayed. ##
ax2 = ax1.twiny()
ax4 = ax3.twiny()
ax57 = ax56.twiny()
t_plot = (AllVars.t_BigBang - AllVars.cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding time values on the bottom.
z_labels = ["$%d$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax2.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax2.set_xlim(PlotScripts.time_xlim)
ax2.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax2.set_xticklabels(z_labels) # But label them as redshifts.
ax4.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax4.set_xlim(PlotScripts.time_xlim)
ax4.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax4.set_xticklabels(z_labels) # But label them as redshifts.
ax57.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax57.set_xlim(PlotScripts.time_xlim)
ax57.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax57.set_xticklabels(z_labels) # But label them as redshifts.
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
#ax1.set_xlabel(r"$z$", size = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$N_\mathrm{Quasars} \: [\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
ax6.set_ylabel(r'$N_\mathrm{Gal} \: [\mathrm{Mpc}^{-3}]$', fontsize = PlotScripts.global_fontsize)
ax3.set_xlabel(r"$\mathrm{Time \: Since \: Big \: Bang \: [Myr]}$", size = PlotScripts.global_labelsize)
ax3.set_ylabel(r'$N_\mathrm{Boosted}$', fontsize = PlotScripts.global_fontsize)
ax5.set_ylabel(r'$\mathrm{Fraction \: Boosted}$', fontsize = PlotScripts.global_fontsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax3.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax7.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax50.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax55.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
fig.tight_layout()
fig2.tight_layout()
fig3.tight_layout()
fig5.tight_layout()
fig6.tight_layout()
outputFile1 = './{0}_quasardensity{1}'.format(output_tag, output_format)
outputFile2 = './{0}_boostedcount{1}'.format(output_tag, output_format)
outputFile3 = './{0}_quasar_activity_halo{1}'.format(output_tag, output_format)
outputFile4 = './{0}_mergercount_global{1}'.format(output_tag, output_format)
outputFile5 = './{0}_mergercount_global_stellarmass{1}'.format(output_tag, output_format)
outputFile6 = './{0}_mergercount_total{1}'.format(output_tag, output_format)
fig.savefig(outputFile1) # Save the figure
fig2.savefig(outputFile2) # Save the figure
fig3.savefig(outputFile3) # Save the figure
fig4.savefig(outputFile4) # Save the figure
fig5.savefig(outputFile5) # Save the figure
fig6.savefig(outputFile6) # Save the figure
print("Saved to {0}".format(outputFile1))
print("Saved to {0}".format(outputFile2))
print("Saved to {0}".format(outputFile3))
print("Saved to {0}".format(outputFile4))
print("Saved to {0}".format(outputFile5))
print("Saved to {0}".format(outputFile6))
plt.close(fig)
plt.close(fig2)
plt.close(fig3)
##
def plot_photon_quasar_fraction(snapshot, filenr, output_tag, QuasarFractionalPhoton, QuasarActivityToggle, NumSubsteps):
ax1 = plt.subplot(111)
counts, bin_edges, bin_middle = AllVars.Calculate_Histogram(QuasarFractionalPhoton, 0.05, 0, 0, 1)
ax1.plot(bin_middle, counts, lw = PlotScripts.global_linewidth, color = 'r')
ax1.axvline(np.mean(QuasarFractionalPhoton[QuasarFractionalPhoton != 0]), lw = 0.5, ls = '-')
ax1.set_yscale('log', nonposy='clip')
ax1.set_xlabel(r"$\mathrm{Fractional \: Photon \: Boost}$")
ax1.set_ylabel(r"$\mathrm{Count}$")
ax1.set_ylim([1e1, 1e5])
outputFile1 = './photonfraction/file{0}_snap{1}_{2}{3}'.format(filenr, snapshot, output_tag, output_format)
plt.tight_layout()
plt.savefig(outputFile1)
print("Saved to {0}".format(outputFile1))
plt.close()
###
def plot_quasar_substep(snapshot, filenr, output_tag, substep):
ax1 = plt.subplot(111)
counts, bin_edges, bin_middle = AllVars.Calculate_Histogram(substep, 0.1, 0, 0, 10)
ax1.plot(bin_middle, counts, lw = PlotScripts.global_linewidth, color = 'r')
ax1.axvline(np.mean(substep[substep != -1]), lw = 0.5, ls = '-')
ax1.set_yscale('log', nonposy='clip')
ax1.set_xlabel(r"$\mathrm{Substep \: Quasar \: Activity}$")
ax1.set_ylabel(r"$\mathrm{Count}$")
# ax1.set_ylim([1e1, 1e5])
outputFile1 = './substep_activity/file{0}_snap{1}_{2}{3}'.format(filenr, snapshot, output_tag, output_format)
plt.tight_layout()
plt.savefig(outputFile1)
print("Saved to {0}".format(outputFile1))
plt.close()
###
def plot_post_quasar_SFR(PlotSnapList, model_number, Gal, output_tag):
ax1 = plt.subplot(111)
ax2 = ax1.twinx()
count = 0
snapshot_thickness = 20 # How many snapshots before/after the quasar event do we want to track?
for snapshot_idx in PlotSnapList[model_number]:
w = np.where((G.QuasarActivity[:, snapshot_idx] == 1) & (G.LenHistory[:, snapshot_idx] > 200.0) & (G.GridStellarMass[:, snapshot_idx] > 0.001))[0]
w_slice_gridhistory = G.GridHistory[w,snapshot_idx-snapshot_thickness:snapshot_idx+snapshot_thickness]
potential_gal = []
for i in range(len(w_slice_gridhistory)):
ww = np.where((w_slice_gridhistory[i] >= 0))[0]
if (len(ww) == snapshot_thickness * 2):
potential_gal.append(w[i])
if (len(potential_gal) == 0):
return
count += 1
print("There were {0} galaxies that had an energetic quasar wind event at snapshot {1} (z = {2:.3f})".format(len(potential_gal), snapshot_idx, AllVars.SnapZ[snapshot_idx]))
chosen_gal = potential_gal[1]
lenhistory_array = np.empty((int(snapshot_thickness*2 + 1)))
SFR_array = np.empty((int(snapshot_thickness*2 + 1)))
gridhistory_array = np.empty((int(snapshot_thickness*2 + 1)))
coldgas_array = np.empty((int(snapshot_thickness*2 + 1)))
t = np.empty((int(snapshot_thickness*2 + 1)))
for i in range(-snapshot_thickness, snapshot_thickness+1):
#print("SFR {0} {1}".format(snapshot_idx + i, G.GridSFR[chosen_gal, snapshot_idx+i]))
#print("ColdGas {0} {1}".format(snapshot_idx + i, G.GridColdGas[chosen_gal, snapshot_idx+i]))
lenhistory_array[i+snapshot_thickness] = (G.LenHistory[chosen_gal, snapshot_idx+i])
SFR_array[i+snapshot_thickness] = (G.GridSFR[chosen_gal, snapshot_idx+i]) #- (G.GridSFR[chosen_gal, snapshot_idx])
gridhistory_array[i+snapshot_thickness] = (G.GridHistory[chosen_gal, snapshot_idx+i])
coldgas_array[i+snapshot_thickness] = (G.GridColdGas[chosen_gal, snapshot_idx+i] * 1.0e10 / AllVars.Hubble_h) #- (G.GridColdGas[chosen_gal, snapshot_idx])
t[i+snapshot_thickness] = (-AllVars.Lookback_Time[snapshot_idx+i] + AllVars.Lookback_Time[snapshot_idx]) * 1.0e3
print("Len History {0}".format(lenhistory_array))
print("Grid History {0}".format(gridhistory_array))
print("Cold Gas {0}".format(coldgas_array))
print("SFR {0}".format(SFR_array))
stellarmass_text = r"$log M_* = {0:.2f} \: M_\odot$".format(np.log10(G.GridStellarMass[chosen_gal, snapshot_idx] * 1.0e10 / AllVars.Hubble_h))
Ndym_text = "Dynamical Time = {0:.2f} Myr".format(G.DynamicalTime[chosen_gal, snapshot_idx])
z_text = "z = {0:.2f}".format(AllVars.SnapZ[snapshot_idx])
ax1.text(0.05, 0.95, z_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.text(0.05, 0.9, stellarmass_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.text(0.05, 0.85, Ndym_text, transform = ax1.transAxes, fontsize = PlotScripts.global_fontsize - 4)
ax1.plot(t, SFR_array, color = 'r', lw = PlotScripts.global_linewidth)
ax2.plot(t, coldgas_array, color = 'b', lw = PlotScripts.global_linewidth)
ax1.set_xlabel(r"$\mathrm{Time \: Since \: Quasar \: Event \: [Myr]}$", size = PlotScripts.global_labelsize - 10)
# ax1.set_ylabel(r"$\mathrm{Fractional \: SFR \: Relative \: To \: SFR_{Quasar}}$", size = PlotScripts.global_labelsize - 10)
# ax2.set_ylabel(r"$\mathrm{Difference \: Cold \: Gas \: Mass \: Relative \: To \: Cold_{Quasar}}$", size = PlotScripts.global_labelsize - 10)
ax1.set_ylabel(r"$\mathrm{SFR} \: [\mathrm{M}_\odot \mathrm{yr}^{-1}]$", size = PlotScripts.global_labelsize - 10)
ax2.set_ylabel(r"$\mathrm{Cold \: Gas \: Mass \: [\mathrm{M}_\odot]}$",size = PlotScripts.global_labelsize - 10)
ax1.set_yscale('log', nonposy='clip')
ax2.set_yscale('log', nonposy='clip')
ax1.plot(np.nan, np.nan, color = 'r', label = r"$\mathrm{SFR}$")
ax1.plot(np.nan, np.nan, color = 'b', label = r"$\mathrm{Cold \: Gas}$")
leg = ax1.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = "{0}_galaxy{2}{1}".format(output_tag, output_format, chosen_gal)
plt.tight_layout()
plt.savefig(outputFile)
print("Saved to {0}".format(outputFile))
plt.close()
exit()
###
def plot_stellarmass_blackhole(SnapList, simulation_norm, mean_galaxy_BHmass,
std_galaxy_BHmass, N_galaxy_BHmass, FirstFile,
LastFile, NumFile, model_tags, output_tag):
master_mean_SMBH, master_std_SMBH, master_N, master_bin_middle = \
collect_across_tasks(mean_galaxy_BHmass, std_galaxy_BHmass,
N_galaxy_BHmass, SnapList, SnapList, True,
m_gal_low, m_gal_high)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
box_factor = (LastFile[model_number] - FirstFile[model_number] + 1.0)/(NumFile[model_number]) # This factor allows us to take a sub-volume of the box and scale the results to represent the entire box.
norm = pow(AllVars.BoxSize,3) / pow(AllVars.Hubble_h, 3) * bin_width * box_factor
for snapshot_idx in range(0, len(SnapList[model_number])):
w = np.where((master_N[model_number][snapshot_idx] > 0.0))[0]
mean = np.log10(master_mean_SMBH[model_number][snapshot_idx][w])
upper = np.log10(np.add(master_mean_SMBH[model_number][snapshot_idx][w],
master_std_SMBH[model_number][snapshot_idx][w]))
lower = np.log10(np.subtract(master_mean_SMBH[model_number][snapshot_idx][w],
master_std_SMBH[model_number][snapshot_idx][w]))
label = "z = {0:.2f}" \
.format(AllVars.SnapZ[SnapList[model_number][snapshot_idx]])
ax1.plot(master_bin_middle[model_number][snapshot_idx][w],
mean, label = label, color = PlotScripts.colors[snapshot_idx],
ls = PlotScripts.linestyles[model_number],
lw = PlotScripts.global_linewidth, rasterized = True)
#ax1.fill_between(bin_middle_stellar_array[model_number][snapshot_idx][w], lower, upper, color = PlotScripts.colors[model_number], alpha = 0.25)
ax2.plot(master_bin_middle[model_number][snapshot_idx][w],
master_N[model_number][snapshot_idx][w] / norm,
label = label, ls = PlotScripts.linestyles[model_number],
lw = PlotScripts.global_linewidth, rasterized = True)
Obs.Get_Data_SMBH()
PlotScripts.plot_SMBH_z8(ax1)
ax1.set_xlabel(r"$\log_{10}\mathrm{M}_* [\mathrm{M}_\odot]$", size = PlotScripts.global_fontsize)
ax1.set_ylabel(r"$\log_{10}\mathrm{M}_\mathrm{BH} [\mathrm{M}_\odot]$", size = PlotScripts.global_fontsize)
ax2.set_xlabel(r"$\log_{10}\mathrm{M}_\mathrm{BH} [\mathrm{M}_\odot]$", size = PlotScripts.global_fontsize)
ax2.set_ylabel(r'$\Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]$', fontsize = PlotScripts.global_fontsize)
ax2.set_yscale('log', nonposy='clip')
ax1.set_xticks(np.arange(7.0, 12.0))
ax1.set_yticks(np.arange(3.0, 12.0))
ax1.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax1.yaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax1.set_xlim([7.0, 10.25])
ax1.set_ylim([3.0, 8.0])
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
leg = ax2.legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = "{0}{1}".format(output_tag, output_format)
plt.tight_layout()
fig.savefig(outputFile)
print("Saved to {0}".format(outputFile))
plt.close(fig)
outputFile2 = "{0}_MF{1}".format(output_tag, output_format)
plt.tight_layout()
fig2.savefig(outputFile2)
print("Saved to {0}".format(outputFile2))
plt.close(fig2)
###
def plot_reionmod(PlotSnapList, SnapList, simulation_norm, mean_reionmod_halo,
std_reionmod_halo, N_halo, mean_reionmod_z, std_reionmod_z,
N_reionmod, plot_z, model_tags, output_tag):
"""
Plot the reionization modifier as a function of halo mass and redshift.
Parameters
----------
PlotSnapList, SnapList: 2D Nested arrays of integers. Outer length is equal to the number of models and inner length is number of snapshots we're plotting/calculated for.
PlotSnapList contains the snapshots for each model we will plot for the halo mass figure.
SnapList contains the snapshots for each model that we have performed calculations for. These aren't equal because we don't want to plot halo curves for ALL redshifts.
simulation_norm: Array of integers. Length is equal to the number of models.
Contains the simulation identifier for each model. Used to set the parameters of each model.
mean_reionmod_halo, std_reionmod_halo: 3D Nested arrays of floats. Most outer length is equal to the number of models, next length is number of snapshots for each model, then inner-most length is the number of halo mass- bins (given by NB).
Contains the mean/standard deviation values for the reionization modifier as a function of halo mass.
NOTE: These are unique for each task.
N_halo: 3D Nested arrays of floats. Lengths are identical to mean_reionmod_halo.
Contains the number of halos in each halo mass bin.
NOTE: These are unique for each task.
mean_reionmod_z, std_reionmod_z: 2D Nested arrays of floats. Outer length is equal to the number of models, inner length is the number of snapshots for each model. NOTE: This inner length can be different to the length of PlotSnapList as we don't necessarily need to plot for every snapshot we calculate.
Contains the mean/standard deviation values for the rieonization modifier as a function of redshift.
NOTE: These are unique for each task.
N_reionmod: 2D Nested arrays of floats. Lengths are identical to mean_reionmod_z.
Contains the number of galaxies at each redshift that have non-negative reionization modifier. A negative reionization modifier is a galaxy who didn't have infall/stripping during the snapshot.
NOTE: These are unique for each task.
plot_z: Boolean.
Denotes whether we want to plot the reionization modifier as a function
of redshift. Useful because we often only calculate statistics for a
subset of the snapshots to decrease computation time. For these runs,
we don't want to plot for something that requires ALL snapshots.
model_tags: Array of strings. Length is equal to the number of models.
Contains the legend labels for each model.
output_tag: String.
The prefix for the output file.
Returns
----------
None. Plot is saved in current directory as "./<output_tag>.<output_format>"
"""
master_mean_reionmod_halo, master_std_reionmod_halo,
master_N_reionmod_halo, master_bin_middle = collect_across_tasks(mean_reionmod_halo,
std_reionmod_halo,
N_halo, SnapList,
PlotSnapList, True,
m_low, m_high)
if plot_z:
master_mean_reionmod_z, master_std_reionmod_z, master_N_reionmod_z, _ = collect_across_tasks(mean_reionmod_z,
std_reionmod_z,
N_reionmod)
if rank == 0:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
if plot_z:
fig2 = plt.figure()
ax10 = fig2.add_subplot(111)
for model_number in range(len(PlotSnapList)):
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
for snapshot_idx in range(len((PlotSnapList[model_number]))):
if snapshot_idx == 0:
label = model_tags[model_number]
else:
label = ""
nonzero_bins = np.where(master_N_reionmod_halo[model_number][snapshot_idx] > 0.0)[0]
ax1.plot(master_bin_middle[model_number][snapshot_idx][nonzero_bins],
master_mean_reionmod_halo[model_number][snapshot_idx][nonzero_bins],
label = label, ls = PlotScripts.linestyles[model_number],
color = PlotScripts.colors[snapshot_idx])
if plot_z:
ax10.plot((AllVars.t_BigBang - AllVars.Lookback_Time[SnapList[model_number]])*1.0e3, master_mean_reionmod_z[model_number], color = PlotScripts.colors[model_number], label = model_tags[model_number], ls = PlotScripts.linestyles[model_number], lw = 3)
for count, snapshot_idx in enumerate(PlotSnapList[model_number]):
#label = r"$\mathbf{z = " + str(int(round(AllVars.SnapZ[snapshot_idx]))) + "}$"
label = r"$\mathbf{z = " + str(AllVars.SnapZ[snapshot_idx]) + "}$"
ax1.plot(np.nan, np.nan, ls = PlotScripts.linestyles[0], color =
PlotScripts.colors[count], label = label)
ax1.set_xlim([8.5, 11.5])
ax1.set_ylim([0.0, 1.05])
ax1.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$\mathbf{Mean ReionMod}$', fontsize = PlotScripts.global_labelsize)
leg = ax1.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile1 = "./{0}_halo{1}".format(output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
if plot_z:
ax10.set_xlabel(r"$\mathbf{Time \: since \: Big \: Bang \: [Myr]}$", fontsize = PlotScripts.global_labelsize)
tick_locs = np.arange(200.0, 1000.0, 100.0)
tick_labels = [r"$\mathbf{%d}$" % x for x in tick_locs]
ax10.xaxis.set_major_locator(mtick.MultipleLocator(100))
ax10.set_xticklabels(tick_labels, fontsize = PlotScripts.global_fontsize)
ax10.set_xlim(PlotScripts.time_xlim)
ax10.set_ylabel(r'$\mathbf{Mean ReionMod}$', fontsize = PlotScripts.global_labelsize)
ax11 = ax10.twiny()
t_plot = (AllVars.t_BigBang - cosmo.lookback_time(PlotScripts.z_plot).value) * 1.0e3 # Corresponding Time values on the bottom.
z_labels = ["$\mathbf{%d}$" % x for x in PlotScripts.z_plot] # Properly Latex-ize the labels.
ax11.set_xlabel(r"$\mathbf{z}$", fontsize = PlotScripts.global_labelsize)
ax11.set_xlim(PlotScripts.time_xlim)
ax11.set_xticks(t_plot) # Set the ticks according to the time values on the bottom,
ax11.set_xticklabels(z_labels, fontsize = PlotScripts.global_fontsize) # But label them as redshifts.
leg = ax10.legend(loc='lower right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile2 = "./{0}_z{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
##
def plot_dust(PlotSnapList, SnapList, simulation_norm, mean_dust_galaxy, std_dust_galaxy,
N_galaxy, mean_dust_halo, std_dust_halo, N_halo, plot_z,
model_tags, output_tag):
"""
"""
master_mean_dust_galaxy, master_std_dust_galaxy, master_N_dust_galaxy, master_bin_middle_galaxy = \
collect_across_tasks(mean_dust_galaxy, std_dust_galaxy, N_galaxy, SnapList,
PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_dust_halo, master_std_dust_halo, master_N_dust_halo, master_bin_middle_halo = \
collect_across_tasks(mean_dust_halo, std_dust_halo, N_halo, SnapList,
PlotSnapList, True, m_low, m_high)
if rank == 0:
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(len(PlotSnapList)):
if(simulation_norm[model_number] == 1):
cosmo = AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
cosmo = AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
cosmo = AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
cosmo = AllVars.Set_Params_Kali()
for snapshot_idx in range(len((PlotSnapList[model_number]))):
if snapshot_idx == 0:
label = model_tags[model_number]
else:
label = ""
nonzero_bins = np.where(master_N_dust_galaxy[model_number][snapshot_idx] > 0.0)[0]
ax1.plot(master_bin_middle_galaxy[model_number][snapshot_idx][nonzero_bins],
master_mean_dust_galaxy[model_number][snapshot_idx][nonzero_bins],
label = label, ls = PlotScripts.linestyles[model_number],
color = PlotScripts.colors[snapshot_idx])
nonzero_bins = np.where(master_N_dust_halo[model_number][snapshot_idx] > 0.0)[0]
ax2.plot(master_bin_middle_halo[model_number][snapshot_idx][nonzero_bins],
master_mean_dust_halo[model_number][snapshot_idx][nonzero_bins],
label = label, ls = PlotScripts.linestyles[model_number],
color = PlotScripts.colors[snapshot_idx])
print(master_mean_dust_halo[model_number][snapshot_idx])
for count, snapshot_idx in enumerate(PlotSnapList[model_number]):
#label = r"$\mathbf{z = " + str(int(round(AllVars.SnapZ[snapshot_idx]))) + "}$"
label = r"$\mathbf{z = " + str(AllVars.SnapZ[snapshot_idx]) + "}$"
ax1.plot(np.nan, np.nan, ls = PlotScripts.linestyles[0], color =
PlotScripts.colors[count], label = label)
ax2.plot(np.nan, np.nan, ls = PlotScripts.linestyles[0], color =
PlotScripts.colors[count], label = label)
ax1.set_xlim([2.0, 10.5])
#ax1.set_ylim([1.0, 6.0])
ax1.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$\mathbf{log_{10} \: \langle M_{Dust}\rangle_{M*}}$', fontsize = PlotScripts.global_labelsize)
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile1 = "./{0}_galaxy{1}".format(output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
ax2.set_xlim([6.8, 11.5])
#ax2.set_ylim([1.0, 6.0])
ax2.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax2.set_ylabel(r'$\mathbf{log_{10} \: \langle M_{Dust}\rangle_{Mvir}}$', fontsize = PlotScripts.global_labelsize)
leg = ax2.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile2 = "./{0}_halo{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
def plot_dust_scatter(SnapList, mass_gal, mass_halo, mass_dust, output_tag):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111, projection='3d')
fig4 = plt.figure()
ax4 = fig4.add_subplot(111)
ax1.scatter(mass_gal, mass_dust)
ax2.scatter(mass_halo, mass_dust)
#ax3.scatter(mass_gal, mass_halo, mass_dust)
hb = ax4.hexbin(mass_halo, mass_dust, bins='log', cmap='inferno')
ax1.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax1.set_ylabel(r'$\mathbf{log_{10} \: M_{Dust}}$', fontsize = PlotScripts.global_labelsize)
ax2.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax2.set_ylabel(r'$\mathbf{log_{10} \: M_{Dust}}$', fontsize = PlotScripts.global_labelsize)
ax4.set_xlabel(r'$\mathbf{log_{10} \: M_{vir} \:[M_{\odot}]}$', fontsize = PlotScripts.global_labelsize)
ax4.set_ylabel(r'$\mathbf{log_{10} \: M_{Dust}}$', fontsize = PlotScripts.global_labelsize)
cb = fig4.colorbar(hb, ax=ax4)
cb.set_label('log10(N)')
outputFile1 = "./{0}_galaxy{1}".format(output_tag, output_format)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
outputFile2 = "./{0}_halo{1}".format(output_tag, output_format)
fig2.savefig(outputFile2, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile2))
plt.close(fig2)
#outputFile3 = "./{0}_3D{1}".format(output_tag, output_format)
#fig3.savefig(outputFile3, bbox_inches='tight') # Save the figure
#print('Saved file to {0}'.format(outputFile3))
#plt.close(fig3)
outputFile4 = "./{0}_hexbin{1}".format(output_tag, output_format)
fig4.savefig(outputFile4, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile4))
plt.close(fig4)
### Here ends the plotting functions. ###
### Here begins the functions that calculate various properties for the galaxies (fesc, Magnitude etc). ###
def Calculate_HaloPartStellarMass(halo_part, stellar_mass, bound_low, bound_high):
'''
Calculates the stellar mass for galaxies whose host halos contain a specified number of particles.
Parameters
----------
halo_part : array
Array containing the number of particles inside each halo.
stellar_mass : array
Array containing the Stellar Mass for each galaxy (entries align with HaloPart). Units of log10(Msun).
bound_low, bound_high : int
We calculate the Stellar Mass of galaxies whose host halo has, bound_low <= halo_part <= bound_high.
Return
-----
mass, mass_std : float
Mean and standard deviation stellar mass of galaxies whose host halo has number of particles between the specified bounds. Units of log10(Msun)
Units
-----
Input Stellar Mass is in units of log10(Msun).
Output mean/std Stellar Mass is in units of log10(Msun).
'''
w = np.where((halo_part >= bound_low) & (halo_part <= bound_high))[0] # Find the halos with particle number between the bounds.
mass = np.mean(10**(stellar_mass[w]))
mass_std = np.std(10**(stellar_mass[w]))
return np.log10(mass), np.log10(mass_std)
##
def calculate_UV_extinction(z, L, M):
'''
Calculates the observed UV magnitude after dust extinction is accounted for.
Parameters
----------
z : float
Redshift we are calculating the extinction at.
L, M : array, length equal to the number of galaxies at this snapshot.
Array containing the UV luminosities and magnitudes.
Returns
-------
M_UV_obs : array, length equal to the number of galaxies at this snapshot.
Array containing the observed UV magnitudes.
Units
-----
Luminosities are in units of log10(erg s^-1 A^-1).
Magnitudes are in the AB system.
'''
M_UV_bins = np.arange(-24, -16, 0.1)
A_mean = np.zeros((len(MUV_bins))) # A_mean is the average UV extinction for a given UV bin.
for j in range(0, len(M_UV_bins)):
beta = calculate_beta(M_UV_bins[j], AllVars.SnapZ[current_snap]) # Fits the beta parameter for the current redshift/UV bin.
dist = np.random.normal(beta, 0.34, 10000) # Generates a normal distribution with mean beta and standard deviation of 0.34.
A = 4.43 + 1.99*dist
A[A < 0] = 0 # Negative extinctions don't make sense.
A_Mean[j] = np.mean(A)
indices = np.digitize(M, M_UV_bins) # Bins the simulation magnitude into the MUV bins. Note that digitize defines an index i if bin[i-1] <= x < bin[i] whereas I prefer bin[i] <= x < bin[i+1]
dust = A_Mean[indices]
flux = AllVars.Luminosity_to_Flux(L, 10.0) # Calculate the flux from a distance of 10 parsec, units of log10(erg s^-1 A^-1 cm^-2).
flux_observed = flux - 0.4*dust
f_nu = ALlVars.spectralflux_wavelength_to_frequency(10**flux_observed, 1600) # Spectral flux desnity in Janksy.
M_UV_obs(-2.5 * np.log10(f_nu) + 8.90) # AB Magnitude from http://www.astro.ljmu.ac.uk/~ikb/convert-units/node2.html
return M_UV_obs
##
def update_cumulative_stats(mean_pool, std_pool, N_pool, mean_local, std_local, N_local):
'''
Update the cumulative statistics (such as Stellar Mass Function, Mvir-Ngamma, fesc-z) that are saved across files.
Pooled mean formulae taken : from https://www.ncbi.nlm.nih.gov/books/NBK56512/
Pooled variance formulae taken from : https://en.wikipedia.org/wiki/Pooled_variance
Parameters
----------
mean_pool, std_pool, N_pool : array of floats with length equal to the number of bins (e.g. the mass bins for the Stellar Mass Function).
The current mean, standard deviation and number of data points within in each bin. This is the array that will be updated in this function.
mean_local, std_local, N_local : array of floats with length equal to the number of bins.
The mean, standard deviation and number of data points within in each bin that will be added to the pool.
Returns
-------
mean_pool, std_pool, N_pool : (See above)
The updated arrays with the local values added and accounted for within the pools.
Units
-----
All units are kept the same as the input units.
Values are in real-space (not log-space).
'''
N_times_mean_local = np.multiply(N_local, mean_local)
N_times_var_local = np.multiply(N_local - 1, np.multiply(std_local, std_local)) # Actually N - 1 because of Bessel's Correction
# https://en.wikipedia.org/wiki/Bessel%27s_correction). #
N_times_mean_pool = np.add(N_times_mean_local, np.multiply(N_pool, mean_pool))
N_times_var_pool = np.add(N_times_var_local, np.multiply(N_pool - 1, np.multiply(std_pool, std_pool)))
N_pool = np.add(N_local, N_pool)
'''
print(mean_local)
print(type(mean_local))
print((type(mean_local).__module__ == np.__name__))
print(isinstance(mean_local, list))
print(isinstance(mean_local,float64))
print(isinstance(mean_local,float32))
'''
if (((type(mean_local).__module__ == np.__name__) == True or (isinstance(mean_local, list) == True)) and isinstance(mean_local, float) == False and isinstance(mean_local, int) == False and isinstance(mean_local,float32) == False and isinstance(mean_local, float64) == False): # Checks to see if we are dealing with arrays.
for i in range(0, len(N_pool)):
if(N_pool[i] == 0): # This case is when we have no data points in the bin.
mean_pool[i] = 0.0
else:
mean_pool[i] = N_times_mean_pool[i]/N_pool[i]
if(N_pool[i] < 3): # In this instance we don't have enough data points to properly calculate the standard deviation.
std_pool[i] = 0.0
else:
std_pool[i] = np.sqrt(N_times_var_pool[i]/ (N_pool[i] - 2)) # We have -2 because there is two instances of N_pool contains two 'N - 1' terms.
else:
mean_pool = N_times_mean_pool / N_pool
if(N_pool < 3):
std_pool = 0.0
else:
std_pool = np.sqrt(N_times_var_pool / (N_pool - 2))
return mean_pool, std_pool
### Here ends the functions that deal with galaxy data manipulation. ###
#################################
if __name__ == '__main__':
np.seterr(divide='ignore')
number_models = 4
galaxies_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/galaxies/const_0.3_z5.782"
merged_galaxies_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/galaxies/const_0.3_MergedGalaxies"
photo_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/grids/cifog/const_0.3_photHI"
zreion_model1="/fred/oz004/jseiler/kali/self_consistent_output/rsage_constant/grids/cifog/const_0.3_reionization_redshift"
galaxies_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/galaxies/fej_alpha0.40_beta0.05_z5.782"
merged_galaxies_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/galaxies/fej_alpha0.40_beta0.05_MergedGalaxies"
photo_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/grids/cifog/fej_alpha0.40_beta0.05_photHI"
zreion_model2="/fred/oz004/jseiler/kali/self_consistent_output/rsage_fej/grids/cifog/fej_alpha0.40_beta0.05_reionization_redshift"
galaxies_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/galaxies/MHneg_1e8_1e12_0.99_0.05_z5.782"
merged_galaxies_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/galaxies/MHneg_1e8_1e12_0.99_0.05_MergedGalaxies"
photo_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/grids/cifog/MHneg_1e8_1e12_0.99_0.05_photHI"
zreion_model3="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHneg/grids/cifog/MHneg_1e8_1e12_0.99_0.05_reionization_redshift"
galaxies_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/galaxies/MHpos_1e8_1e12_0.01_0.50_z5.782"
merged_galaxies_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/galaxies/MHpos_1e8_1e12_0.01_0.50_MergedGalaxies"
photo_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/grids/cifog/MHpos_1e8_1e12_0.01_0.50_photHI"
zreion_model4="/fred/oz004/jseiler/kali/self_consistent_output/rsage_MHpos/grids/cifog/MHpos_1e8_1e12_0.01_0.50_reionization_redshift"
galaxies_filepath_array = [galaxies_model1,
galaxies_model2,
galaxies_model3,
galaxies_model4]
photo_array = [photo_model1,
photo_model2,
photo_model3,
photo_model4]
zreion_array = [zreion_model1,
zreion_model2,
zreion_model3,
zreion_model4]
GridSize_array = [256,
256,
256,
256]
precision_array = [2,
2,
2,
2]
merged_galaxies_filepath_array = [merged_galaxies_model1,
merged_galaxies_model2,
merged_galaxies_model3,
merged_galaxies_model4]
number_substeps = [10, 10, 10, 10] # How many substeps does each model have (specified by STEPS variable within SAGE).
number_snapshots = [99, 99, 99, 99] # Number of snapshots in the simulation (we don't have to do calculations for ALL snapshots).
# Tiamat extended has 164 snapshots.
FirstFile = [0, 0, 0, 0] # The first file number THAT WE ARE PLOTTING.
#LastFile = [63, 63, 63, 63] # The last file number THAT WE ARE PLOTTING.
LastFile = [0, 0, 0, 0] # The last file number THAT WE ARE PLOTTING.
NumFile = [64, 64, 64, 64] # The number of files for this simulation (plotting a subset of these files is allowed).
same_files = [0, 0, 0, 0] # In the case that model 1 and model 2 (index 0 and 1) have the same files, we don't want to read them in a second time.
# This array will tell us if we should keep the files for the next model or otherwise throw them away.
# The files will be kept until same_files[current_model_number] = 0.
# For example if we had 5 models we were plotting and model 1, 2, 3 shared the same files and models 4, 5 shared different files,
# Then same_files = [1, 1, 0, 1, 0] would be the correct values.
done_model = np.zeros((number_models)) # We use this to keep track of if we have done a model already.
model_tags = [r"$\mathbf{f_\mathrm{esc} \: Constant}$",
r"$\mathbf{f_\mathrm{esc} \: \propto \: f_\mathrm{ej}}$",
r"$\mathbf{f_\mathrm{esc} \: \propto \: M_\mathrm{H}^{-1}}$",
r"$\mathbf{f_\mathrm{esc} \: \propto \: M_\mathrm{H}}$"]
## Constants used for each model. ##
# Need to add an entry for EACH model. #
halo_cut = [32, 32, 32, 32] # Only calculate properties for galaxies whose host halos have at least this many particles.
# For Tiamat, z = [6, 7, 8] are snapshots [78, 64, 51]
# For Kali, z = [6, 7, 8] are snapshots [93, 76, 64]
#SnapList = [np.arange(0,99), np.arange(0,99)] # These are the snapshots over which the properties are calculated. NOTE: If the escape fraction is selected (fesc_prescription == 3) then this should be ALL the snapshots in the simulation as this prescriptions is temporally important.
#SnapList = [np.arange(20,99), np.arange(20, 99), np.arange(20, 99)]
SnapList = [[33, 50, 76, 93],
[33, 50, 76, 93],
[33, 50, 76, 93],
[33, 50, 76, 93]]
#SnapList = [[64],
# [64],
# [64],
# [64]]
#SnapList = [[33, 50, 64, 76, 93]]
#SnapList = [[64], [64]]
#SnapList = [np.arange(20,99)]
#PlotSnapList = [[30, 50, 64, 76, 93]]
#PlotSnapList = [[93, 76, 64], [93, 76, 64]]
#SnapList = [[93, 76, 64], [93, 76, 64]]
PlotSnapList = SnapList
simulation_norm = [5, 5, 5, 5] # Changes the constants (cosmology, snapshot -> redshift mapping etc) for each simulation.
# 0 for MySim (Manodeep's old one).
# 1 for Mini-Millennium.
# 2 for Tiamat (up to z =5).
# 3 for extended Tiamat (down to z = 1.6ish).
# 4 for Britton's Sim Pip
# 5 for Manodeep's new simulation Kali.
stellar_mass_halolen_lower = [32, 95, 95, 95] # These limits are for the number of particles in a halo.
stellar_mass_halolen_upper = [50, 105, 105, 105] # We calculate the average stellar mass for galaxies whose host halos have particle count between these limits.
calculate_observed_LF = [0, 0, 0, 0] # Determines whether we want to account for dust extinction when calculating the luminosity function of each model.
paper_plots = 1
##############################################################################################################
## Do a few checks to ensure all the arrays were specified properly. ##
for model_number in range(0,number_models):
assert(LastFile[model_number] - FirstFile[model_number] + 1 >= size)
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
if (number_snapshots[model_number] != len(AllVars.SnapZ)): # Here we do a check to ensure that the simulation we've defined correctly matches the number of snapshots we have also defined.
print("The number_snapshots array is {0}".format(number_snapshots))
print("The simulation_norm array is {0}".format(simulation_norm))
print("The number of snapshots for model_number {0} has {1} but you've said there is only {2}".format(model_number, len(AllVars.SnapZ), number_snapshots[model_number]))
raise ValueError("Check either that the number of snapshots has been defined properly and that the normalization option is correct.")
######################################################################
##################### SETTING UP ARRAYS ##############################
######################################################################
### The arrays are set up in a 3 part process. ###
### This is because our arrays are 3D nested to account for the model number and snapshots. ###
# First set up the outer most array. #
## Arrays for functions of stellar mass. ##
SMF = [] # Stellar Mass Function.
mean_fesc_galaxy_array = [] # Mean escape fraction as a function of stellar mass.
std_fesc_galaxy_array = [] # Same as above but standard devation.
N_galaxy_array = [] # Number of galaxies as a function of stellar mass.
mean_BHmass_galaxy_array = [] # Black hole mass as a function of stellar mass.
std_BHmass_galaxy_array = [] # Same as above but standard deviation.
mergers_galaxy_array = [] # Number of mergers as a function of halo mass.
mean_dust_galaxy_array = [] # Mean dust mass as a function of stellar mass.
std_dust_galaxy_array = [] # Same as above but standard deviation.
mean_sfr_galaxy_array = [] # Mean star formation rate as a
# function of stellar mass
std_sfr_galaxy_array = [] # Same as above but standard deviation.
mean_ssfr_galaxy_array = [] # Mean specific star formation rate as a
# function of stellar mass
std_ssfr_galaxy_array = [] # Same as above but standard deviation.
mean_Ngamma_galaxy_array = [] # Mean number of ionizing photons emitted as
# a function of stellar mass.
std_Ngamma_galaxy_array = [] # Same as above but standard deviation.
mean_photo_galaxy_array = [] # Mean photoionization rate.
std_photo_galaxy_array = [] # Std photoionization rate.
mean_reionmod_galaxy_array = [] # Mean reionization modifier using RSAGE.
std_reionmod_galaxy_array = [] # Std.
mean_gnedin_reionmod_galaxy_array = [] # Mean reionization modifier using Gnedin analytic prescription.
std_gnedin_reionmod_galaxy_array = [] # Std.
## Arrays for functions of halo mass. ##
mean_ejected_halo_array = [] # Mean ejected fractions as a function of halo mass.
std_ejected_halo_array = [] # Same as above but standard deviation.
mean_fesc_halo_array = [] # Mean escape fraction as a function of halo mass.
std_fesc_halo_array = [] # Same as above but standard deviation.
mean_Ngamma_halo_array = [] # Mean number of ionizing photons THAT ESCAPE as a function of halo mass.
std_Ngamma_halo_array = [] # Same as above but standard deviation.
N_halo_array = [] # Number of galaxies as a function of halo mass.
mergers_halo_array = [] # Number of mergers as a function of halo mass.
mean_quasar_activity_array = [] # Mean fraction of galaxies that have quasar actvitity as a function of halo mas.
std_quasar_activity_array = [] # Same as above but standard deviation.
mean_reionmod_halo_array = [] # Mean reionization modifier as a function of halo mass.
std_reionmod_halo_array = [] # Same as above but for standard deviation.
mean_dust_halo_array = [] # Mean dust mass as a function of halo mass.
std_dust_halo_array = [] # Same as above but standard deviation.
## Arrays for functions of redshift. ##
sum_Ngamma_z_array = [] # Total number of ionizing photons THAT ESCAPE as a functio of redshift.
mean_fesc_z_array = [] # Mean number of ionizing photons THAT ESCAPE as a function of redshift.
std_fesc_z_array = [] # Same as above but standard deviation.
N_z = [] # Number of galaxies as a function of redshift.
galaxy_halo_mass_mean = [] # Mean galaxy mass as a function of redshift.
N_quasars_z = [] # This tracks how many quasars went off during a specified snapshot.
N_quasars_boost_z = [] # This tracks how many galaxies are having their escape fraction boosted by quasar activity.
dynamicaltime_quasars_mean_z = [] # Mean dynamical time of galaxies that have a quasar event as a function of redshift.
dynamicaltime_quasars_std_z = [] # Same as above but standard deviation.
dynamicaltime_all_mean_z = [] # Mean dynamical time of all galaxies.
dynamicaltime_all_std_z = [] # Same as above but standard deviation.
mean_reionmod_z = [] # Mean reionization modifier as a function of redshift.
std_reionmod_z = [] # Same as above but for standard deviation.
N_reionmod_z = [] # Number of galaxies with a non-negative reionization modifier.
mean_ejected_z = [] # Mean ejected fraction as a function of redshift.
std_ejected_z = [] # Same as above but for standard deviation.
## Arrays that aren't functions of other variables. ##
Ngamma_global = []
mass_global = []
fesc_global = []
## Arrays as a function of fej ##
mean_Ngamma_fej = []
std_Ngamma_fej = []
N_fej = []
## Now the outer arrays have been defined, set up the next nest level for the number of models. ##
for model_number in range(0,number_models):
## Galaxy Arrays ##
SMF.append([])
mean_fesc_galaxy_array.append([])
std_fesc_galaxy_array.append([])
N_galaxy_array.append([])
mean_BHmass_galaxy_array.append([])
std_BHmass_galaxy_array.append([])
mergers_galaxy_array.append([])
mean_dust_galaxy_array.append([])
std_dust_galaxy_array.append([])
mean_sfr_galaxy_array.append([])
std_sfr_galaxy_array.append([])
mean_ssfr_galaxy_array.append([])
std_ssfr_galaxy_array.append([])
mean_Ngamma_galaxy_array.append([])
std_Ngamma_galaxy_array.append([])
mean_photo_galaxy_array.append([])
std_photo_galaxy_array.append([])
mean_reionmod_galaxy_array.append([])
std_reionmod_galaxy_array.append([])
mean_gnedin_reionmod_galaxy_array.append([])
std_gnedin_reionmod_galaxy_array.append([])
## Halo arrays. ##
mean_ejected_halo_array.append([])
std_ejected_halo_array.append([])
mean_fesc_halo_array.append([])
std_fesc_halo_array.append([])
mean_Ngamma_halo_array.append([])
std_Ngamma_halo_array.append([])
N_halo_array.append([])
mergers_halo_array.append([])
mean_quasar_activity_array.append([])
std_quasar_activity_array.append([])
mean_reionmod_halo_array.append([])
std_reionmod_halo_array.append([])
mean_dust_halo_array.append([])
std_dust_halo_array.append([])
## Redshift arrays. ##
sum_Ngamma_z_array.append([])
mean_fesc_z_array.append([])
std_fesc_z_array.append([])
N_z.append([])
galaxy_halo_mass_mean.append([])
N_quasars_z.append([])
N_quasars_boost_z.append([])
dynamicaltime_quasars_mean_z.append([])
dynamicaltime_quasars_std_z.append([])
dynamicaltime_all_mean_z.append([])
dynamicaltime_all_std_z.append([])
mean_reionmod_z.append([])
std_reionmod_z.append([])
N_reionmod_z.append([])
mean_ejected_z.append([])
std_ejected_z.append([])
## Arrays that aren't functions ##
Ngamma_global.append([])
mass_global.append([])
fesc_global.append([])
## Arrays as a function of fej ##
mean_Ngamma_fej.append([])
std_Ngamma_fej.append([])
N_fej.append([])
## And then finally set up the inner most arrays ##
## NOTE: We do the counts as float so we can keep consistency when we're calling MPI operations (just use MPI.FLOAT rather than deciding if we need to use MPI.INT)
for snapshot_idx in range(len(SnapList[model_number])):
## For the arrays that are functions of stellar/halo mass, the inner most level will be an array with the statistic binned across mass ##
## E.g. SMF[model_number][snapshot_idx] will return an array whereas N_z[model_number][snapshot_idx] will return a float. ##
## Functions of stellar mass arrays. ##
SMF[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_fesc_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_fesc_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
N_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_BHmass_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_BHmass_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mergers_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_dust_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_dust_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_sfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_sfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_ssfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_ssfr_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_Ngamma_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_Ngamma_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_photo_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_photo_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
mean_gnedin_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
std_gnedin_reionmod_galaxy_array[model_number].append(np.zeros((NB_gal), dtype = np.float32))
## Function of halo mass arrays. ##
mean_ejected_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_ejected_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_fesc_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_fesc_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_Ngamma_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_Ngamma_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
N_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mergers_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_quasar_activity_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_quasar_activity_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_reionmod_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_reionmod_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
mean_dust_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
std_dust_halo_array[model_number].append(np.zeros((NB), dtype = np.float32))
## Function of Redshift arrays. ##
sum_Ngamma_z_array[model_number].append(0.0)
mean_fesc_z_array[model_number].append(0.0)
std_fesc_z_array[model_number].append(0.0)
N_z[model_number].append(0.0)
galaxy_halo_mass_mean[model_number].append(0.0)
N_quasars_z[model_number].append(0.0)
N_quasars_boost_z[model_number].append(0.0)
dynamicaltime_quasars_mean_z[model_number].append(0.0)
dynamicaltime_quasars_std_z[model_number].append(0.0)
dynamicaltime_all_mean_z[model_number].append(0.0)
dynamicaltime_all_std_z[model_number].append(0.0)
mean_reionmod_z[model_number].append(0.0)
std_reionmod_z[model_number].append(0.0)
N_reionmod_z[model_number].append(0.0)
mean_ejected_z[model_number].append(0.0)
std_ejected_z[model_number].append(0.0)
Ngamma_global[model_number].append([])
mass_global[model_number].append([])
fesc_global[model_number].append([])
## Arrays as a function of fej. ##
mean_Ngamma_fej[model_number].append(np.zeros((NB_fej), dtype = np.float32))
std_Ngamma_fej[model_number].append(np.zeros((NB_fej), dtype = np.float32))
N_fej[model_number].append(np.zeros((NB_fej), dtype = np.float32))
######################################################################
#################### ALL ARRAYS SETUP ################################
######################################################################
## Now it's (finally) time to read in all the data and do the actual work. ##
for model_number in range(number_models):
if(simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif(simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif(simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
else:
print("Simulation norm was set to {0}.".format(simulation_norm[model_number]))
raise ValueError("This option has been implemented yet. Get your head in the game Jacob!")
if (done_model[model_number] == 1): # If we have already done this model (i.e., we kept the files and skipped this loop), move along.
assert(FirstFile[model_number] == FirstFile[model_number - 1])
assert(LastFile[model_number] == LastFile[model_number - 1])
continue
for fnr in range(FirstFile[model_number] + rank, LastFile[model_number]+1, size): # Divide up the input files across the processors.
GG, Gal_Desc = ReadScripts.ReadGals_SAGE(galaxies_filepath_array[model_number], fnr, number_snapshots[model_number], comm) # Read galaxies
G_Merged, _ = ReadScripts.ReadGals_SAGE(merged_galaxies_filepath_array[model_number], fnr, number_snapshots[model_number], comm) # Also need the merged galaxies.
G = ReadScripts.Join_Arrays(GG, G_Merged, Gal_Desc) # Then join them together for all galaxies.
keep_files = 1 # Flips to 0 when we are done with this file.
current_model_number = model_number # Used to differentiate between outer model_number and the inner model_number because we can keep files across model_numbers.
while(keep_files == 1):
## Just a few definitions to cut down the clutter a smidge. ##
current_halo_cut = halo_cut[current_model_number]
NumSubsteps = number_substeps[current_model_number]
do_observed_LF = calculate_observed_LF[current_model_number]
for snapshot_idx in range(0, len(SnapList[current_model_number])): # Now let's calculate stats for each required redshift.
current_snap = SnapList[current_model_number][snapshot_idx] # Get rid of some clutter.
w_gal = np.where((G.GridHistory[:, current_snap] != -1) & (G.GridStellarMass[:, current_snap] > 0.0) & (G.LenHistory[:, current_snap] > current_halo_cut) & (G.GridSFR[:, current_snap] >= 0.0) & (G.GridFoFMass[:, current_snap] >= 0.0))[0] # Only include those galaxies that existed at the current snapshot, had positive (but not infinite) stellar/Halo mass and Star formation rate. Ensure the galaxies also resides in a halo that is sufficiently resolved.
w_merged_gal = np.where((G_Merged.GridHistory[:, current_snap] != -1) & (G_Merged.GridStellarMass[:, current_snap] > 0.0) & (G_Merged.LenHistory[:, current_snap] > current_halo_cut) & (G_Merged.GridSFR[:, current_snap] >= 0.0) & (G_Merged.GridFoFMass[:, current_snap] >= 0.0) & (G_Merged.LenMergerGal[:,current_snap] > current_halo_cut))[0]
print("There were {0} galaxies for snapshot {1} (Redshift {2:.3f}) model {3}.".format(len(w_gal), current_snap, AllVars.SnapZ[current_snap], current_model_number))
if (len(w_gal) == 0):
continue
mass_gal = np.log10(G.GridStellarMass[w_gal, current_snap] * 1.0e10 / AllVars.Hubble_h) # Msun. Log Units.
w_SFR = w_gal[np.where((G.GridSFR[w_gal, current_snap] > 0.0))[0]]
mass_SFR_gal = np.log10(G.GridStellarMass[w_SFR, current_snap] * \
1.0e10 / AllVars.Hubble_h)
SFR_gal = np.log10(G.GridSFR[w_SFR,current_snap])
sSFR_gal = SFR_gal - mass_SFR_gal
halo_part_count = G.LenHistory[w_gal, current_snap]
metallicity_gal = G.GridZ[w_gal, current_snap]
metallicity_tremonti_gal = np.log10(G.GridZ[w_gal, current_snap] / 0.02) + 9.0 # Using the Tremonti relationship for metallicity.
mass_central = np.log10(G.GridFoFMass[w_gal, current_snap] * 1.0e10 / AllVars.Hubble_h) # Msun. Log Units.
ejected_fraction = G.EjectedFraction[w_gal, current_snap]
w_dust = np.where(((G.GridDustColdGas[w_gal, current_snap]
+G.GridDustHotGas[w_gal, current_snap]
+G.GridDustEjectedMass[w_gal, current_snap]) > 0.0)
& (G.GridType[w_gal, current_snap] == 0))[0]
total_dust_gal = np.log10((G.GridDustColdGas[w_gal[w_dust], current_snap]
+G.GridDustHotGas[w_gal[w_dust], current_snap]
+G.GridDustEjectedMass[w_gal[w_dust], current_snap])
* 1.0e10 / AllVars.Hubble_h)
mass_gal_dust = np.log10(G.GridStellarMass[w_gal[w_dust], current_snap]
* 1.0e10 / AllVars.Hubble_h)
mass_centralgal_dust = np.log10(G.GridFoFMass[w_gal[w_dust], current_snap]
* 1.0e10 / AllVars.Hubble_h)
fesc = G.Gridfesc[w_gal, current_snap]
fesc[fesc < 0.0] = 0.0
Ngamma_gal = G.GridNgamma_HI[w_gal, current_snap] # 1.0e50
# photons/s.
if model_number < 3:
Ngamma_gal += 50.0 # Old versions of SAGE incorrectly
# subtracted 50.
Ngamma_gal *= fesc
reionmod = G.GridReionMod[w_gal, current_snap]
mass_reionmod_central = mass_central[reionmod > -1]
mass_reionmod_gal = mass_gal[reionmod > -1]
reionmod = reionmod[reionmod > -1] # Some satellite galaxies that don't have HotGas and hence won't be stripped. As a result reionmod = -1 for these. Ignore them.
mass_BH = G.GridBHMass[w_gal, current_snap] * 1.0e10 / AllVars.Hubble_h # Msun. Not log units.
L_UV = SFR_gal + 39.927 # Using relationship from STARBURST99, units of erg s^-1 A^-1. Log Units.
M_UV = AllVars.Luminosity_to_ABMag(L_UV, 1600)
if (do_observed_LF == 1): # Calculate the UV extinction if requested.
M_UV_obs = calculate_UV_extinction(AllVars.SnapZ[current_snap], L_UV, M_UV[snap_idx])
galaxy_halo_mass_mean_local, galaxy_halo_mass_std_local = Calculate_HaloPartStellarMass(halo_part_count, mass_gal, stellar_mass_halolen_lower[current_model_number], stellar_mass_halolen_upper[current_model_number]) # This is the average stellar mass for galaxies whose halos have the specified number of particles.
galaxy_halo_mass_mean[current_model_number][snapshot_idx] += pow(10, galaxy_halo_mass_mean_local) / (LastFile[current_model_number] + 1) # Adds to the average of the mean.
photofield_path = "{0}_{1:03d}".format(photo_array[current_model_number],
current_snap)
#photo_gal = photo.calc_gal_photoion(G.GridHistory[w_gal, current_snap],
# photofield_path,
# GridSize_array[current_model_number],
# precision_array[current_model_number])
#zreion_path = "{0}".format(zreion_array[current_model_number])
#zreion_gal = photo.calc_gal_zreion(G.GridHistory[w_gal, current_snap],
# zreion_path,
# GridSize_array[current_model_number],
# precision_array[current_model_number])
z_0 = 8.0
z_r = 7.0
gnedin_mfilt = ga.get_filter_mass(np.array(AllVars.SnapZ[current_snap]),
z_0, z_r)
gnedin_reionmod_gal = 1.0 / pow(1.0 + 0.26*pow(10, gnedin_mfilt - mass_central), 3.0)
###########################################
######## BASE PROPERTIES CALCULATED #######
###########################################
# Time to calculate relevant statistics.
### Functions of Galaxies/Stellar Mass ###
## Stellar Mass Function ##
(counts_local, bin_edges, bin_middle) = AllVars.Calculate_Histogram(mass_gal, bin_width, 0, m_gal_low, m_gal_high) # Bin the Stellar Mass
SMF[current_model_number][snapshot_idx] += counts_local
## Escape Fraction ##
(mean_fesc_galaxy_local, std_fesc_galaxy_local, N_local, sum_fesc_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(mass_gal, fesc, bin_width, m_gal_low, m_gal_high)
(mean_fesc_galaxy_array[current_model_number][snapshot_idx], std_fesc_galaxy_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_fesc_galaxy_array[current_model_number][snapshot_idx], std_fesc_galaxy_array[current_model_number][snapshot_idx], N_galaxy_array[current_model_number][snapshot_idx], mean_fesc_galaxy_local, std_fesc_galaxy_local, N_local)
## Black Hole Mass ##
(mean_BHmass_galaxy_local, std_BHmass_galaxy_local, N_local, sum_BHmass_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(mass_gal, mass_BH, bin_width, m_gal_low, m_gal_high)
(mean_BHmass_galaxy_array[current_model_number][snapshot_idx], std_BHmass_galaxy_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_BHmass_galaxy_array[current_model_number][snapshot_idx], std_BHmass_galaxy_array[current_model_number][snapshot_idx], N_galaxy_array[current_model_number][snapshot_idx], mean_BHmass_galaxy_local, std_BHmass_galaxy_local, N_local)
## Total Dust Mass ##
(mean_dust_galaxy_local, std_dust_galaxy_local, N_local,
sum_dust_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal_dust, total_dust_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_dust_galaxy_array[current_model_number][snapshot_idx],
std_dust_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_dust_galaxy_array[current_model_number][snapshot_idx],
std_dust_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_dust_galaxy_local,
std_dust_galaxy_local,
N_local)
## Star Formation Rate ##
(mean_sfr_galaxy_local, std_sfr_galaxy_local, N_local,
sum_sfr_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(
mass_SFR_gal, SFR_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_sfr_galaxy_array[current_model_number][snapshot_idx],
std_sfr_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_sfr_galaxy_array[current_model_number][snapshot_idx],
std_sfr_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_sfr_galaxy_local,
std_sfr_galaxy_local,
N_local)
## Specific Star Formation Rate ##
(mean_ssfr_galaxy_local, std_ssfr_galaxy_local, N_local,
sum_ssfr_galaxy, bin_middle) = AllVars.Calculate_2D_Mean(
mass_SFR_gal, sSFR_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_ssfr_galaxy_array[current_model_number][snapshot_idx],
std_ssfr_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_ssfr_galaxy_array[current_model_number][snapshot_idx],
std_ssfr_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_ssfr_galaxy_local,
std_ssfr_galaxy_local,
N_local)
## Number of Ionizing Photons ##
(mean_Ngamma_galaxy_local, std_Ngamma_galaxy_local, N_local,
sum_Ngamma_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal, Ngamma_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_Ngamma_galaxy_array[current_model_number][snapshot_idx],
std_Ngamma_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_Ngamma_galaxy_array[current_model_number][snapshot_idx],
std_Ngamma_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_Ngamma_galaxy_local,
std_Ngamma_galaxy_local,
N_local)
## Photoionization rate ##
'''
(mean_photo_galaxy_local, std_photo_galaxy_local, N_local,
sum_photo_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal, photo_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_photo_galaxy_array[current_model_number][snapshot_idx],
std_photo_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_photo_galaxy_array[current_model_number][snapshot_idx],
std_photo_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_photo_galaxy_local,
std_photo_galaxy_local,
N_local)
'''
## RSAGE Reionization Modifier ##
(mean_reionmod_galaxy_local, std_reionmod_galaxy_local, N_local,
sum_reionmod_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_reionmod_gal, reionmod,
bin_width, m_gal_low,
m_gal_high)
(mean_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_reionmod_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_reionmod_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_reionmod_galaxy_local,
std_reionmod_galaxy_local,
N_local)
## Gnedin Reionization Modifier ##
(mean_gnedin_reionmod_galaxy_local, std_gnedin_reionmod_galaxy_local, N_local,
sum_gnedin_reionmod_galaxy_local, bin_middle) = AllVars.Calculate_2D_Mean(
mass_gal, gnedin_reionmod_gal,
bin_width, m_gal_low,
m_gal_high)
(mean_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx],
std_gnedin_reionmod_galaxy_array[current_model_number][snapshot_idx],
N_galaxy_array[current_model_number][snapshot_idx],
mean_gnedin_reionmod_galaxy_local,
std_gnedin_reionmod_galaxy_local,
N_local)
N_galaxy_array[current_model_number][snapshot_idx] += N_local
### Functions of Halos/Halo Mass ###
## Ejected Fraction ##
(mean_ejected_halo_local, std_ejected_halo_local, N_local, sum_ejected_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_central, ejected_fraction, bin_width, m_low, m_high)
(mean_ejected_halo_array[current_model_number][snapshot_idx], std_ejected_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_ejected_halo_array[current_model_number][snapshot_idx], std_ejected_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_ejected_halo_local, std_ejected_halo_local, N_local) # Then update the running total.
## Quasar Fraction ##
(mean_quasar_activity_local, std_quasar_activity_local,N_local, sum_quasar_activity_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_central, G.QuasarActivity[w_gal, current_snap], bin_width, m_low, m_high)
(mean_quasar_activity_array[current_model_number][snapshot_idx], std_quasar_activity_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_quasar_activity_array[current_model_number][snapshot_idx], std_quasar_activity_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_quasar_activity_local, std_quasar_activity_local, N_local) # Then update the running total.
## fesc Value ##
(mean_fesc_halo_local, std_fesc_halo_local, N_local, sum_fesc_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_central, fesc, bin_width, m_low, m_high)
(mean_fesc_halo_array[current_model_number][snapshot_idx], std_fesc_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_fesc_halo_array[current_model_number][snapshot_idx], std_fesc_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_fesc_halo_local, std_fesc_halo_local, N_local) # Then update the running total.
## Ngamma ##
#(mean_Ngamma_halo_local, std_Ngamma_halo_local, N_local, sum_Ngamma_halo, bin_middle) \
#= AllVars.Calculate_2D_Mean(mass_central, ionizing_photons, bin_width, m_low, m_high)
#mean_Ngamma_halo_local = np.divide(mean_Ngamma_halo_local, 1.0e50) ## Divide out a constant to keep the numbers manageable.
#std_Ngamma_halo_local = np.divide(std_Ngamma_halo_local, 1.0e50)
#(mean_Ngamma_halo_array[current_model_number][snapshot_idx], std_Ngamma_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_Ngamma_halo_array[current_model_number][snapshot_idx], std_Ngamma_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_Ngamma_halo_local, std_Ngamma_halo_local, N_local) # Then update the running total.
## Reionization Modifier ##
(mean_reionmod_halo_local, std_reionmod_halo_local, N_local, sum_reionmod_halo, bin_middle) = AllVars.Calculate_2D_Mean(mass_reionmod_central, reionmod, bin_width, m_low, m_high)
(mean_reionmod_halo_array[current_model_number][snapshot_idx], std_reionmod_halo_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_reionmod_halo_array[current_model_number][snapshot_idx], std_reionmod_halo_array[current_model_number][snapshot_idx], N_halo_array[current_model_number][snapshot_idx], mean_reionmod_halo_local, std_reionmod_halo_local, N_local) # Then update the running total.
## Total Dust Mass ##
(mean_dust_halo_local, std_dust_halo_local, N_local,
sum_dust_halo, bin_middle) = AllVars.Calculate_2D_Mean(
mass_centralgal_dust, total_dust_gal,
bin_width, m_low,
m_high)
(mean_dust_halo_array[current_model_number][snapshot_idx],
std_dust_halo_array[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_dust_halo_array[current_model_number][snapshot_idx],
std_dust_halo_array[current_model_number][snapshot_idx],
N_halo_array[current_model_number][snapshot_idx],
mean_dust_halo_local,
std_dust_halo_local,
N_local)
N_halo_array[current_model_number][snapshot_idx] += N_local
### Functions of redshift ###
## Ngamma ##
#sum_Ngamma_z_array[current_model_number][snapshot_idx] += np.sum(np.divide(ionizing_photons, 1.0e50)) # Remember that we're dividing out a constant!
## fesc Value ##
(mean_fesc_z_array[current_model_number][snapshot_idx], std_fesc_z_array[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_fesc_z_array[current_model_number][snapshot_idx], std_fesc_z_array[current_model_number][snapshot_idx], N_z[current_model_number][snapshot_idx], np.mean(fesc), np.std(fesc), len(w_gal)) # Updates the mean escape fraction for this redshift.
## Reionization Modifier ##
(mean_reionmod_z[current_model_number][snapshot_idx], std_reionmod_z[current_model_number][snapshot_idx]) = update_cumulative_stats(mean_reionmod_z[current_model_number][snapshot_idx], std_reionmod_z[current_model_number][snapshot_idx], N_reionmod_z[current_model_number][snapshot_idx], np.mean(reionmod), np.std(reionmod), len(reionmod))
N_reionmod_z[current_model_number][snapshot_idx] += len(reionmod)
## Ejected Fraction ##
(mean_ejected_z[current_model_number][snapshot_idx],std_ejected_z[current_model_number][snapshot_idx]) \
= update_cumulative_stats(mean_ejected_z[current_model_number][snapshot_idx],
std_ejected_z[current_model_number][snapshot_idx],
N_z[current_model_number][snapshot_idx],
np.mean(ejected_fraction),
np.std(ejected_fraction),
len(w_gal))
N_z[current_model_number][snapshot_idx] += len(w_gal)
#### Arrays that are just kept across snapshots ##
Ngamma_global[current_model_number][snapshot_idx].append(Ngamma_gal)
mass_global[current_model_number][snapshot_idx].append(mass_gal)
fesc_global[current_model_number][snapshot_idx].append(fesc)
#### Arrays that are function of fej ##
(mean_Ngamma_fej_local, std_Ngamma_fej_local, N_local,
sum_Ngamma_fej_local, bin_middle) = AllVars.Calculate_2D_Mean(
ejected_fraction, Ngamma_gal,
fej_bin_width, fej_low, fej_high)
(mean_Ngamma_fej[current_model_number][snapshot_idx],
std_Ngamma_fej[current_model_number][snapshot_idx]) = \
update_cumulative_stats(mean_Ngamma_fej[current_model_number][snapshot_idx],
std_Ngamma_fej[current_model_number][snapshot_idx],
N_fej[current_model_number][snapshot_idx],
mean_Ngamma_fej_local,
std_Ngamma_fej_local,
N_local)
N_fej[current_model_number][snapshot_idx] += N_local
done_model[current_model_number] = 1
if (current_model_number < number_models):
keep_files = same_files[current_model_number] # Decide if we want to keep the files loaded or throw them out.
current_model_number += 1 # Update the inner loop model number.
#StellarMassFunction(PlotSnapList, SMF, simulation_norm, FirstFile,
# LastFile, NumFile, galaxy_halo_mass_mean, model_tags,
# 1, paper_plots, "wtf")
#plot_reionmod(PlotSnapList, SnapList, simulation_norm, mean_reionmod_halo_array,
#std_reionmod_halo_array, N_halo_array, mean_reionmod_z,
#std_reionmod_z, N_reionmod_z, False, model_tags,
#"reionmod_selfcon")
#plot_dust_scatter(SnapList, mass_gal_dust, mass_centralgal_dust, total_dust_gal,
# "dust_scatter")
#plot_dust(PlotSnapList, SnapList, simulation_norm, mean_dust_galaxy_array,
# std_dust_galaxy_array, N_galaxy_array, mean_dust_halo_array,
# std_dust_halo_array, N_halo_array, False, model_tags,
# "dustmass_total")
#plot_stellarmass_blackhole(PlotSnapList, simulation_norm, mean_BHmass_galaxy_array,
# std_BHmass_galaxy_array, N_galaxy_array,
# FirstFile, LastFile, NumFile,
# model_tags, "StellarMass_BHMass")
#plot_ejectedfraction(SnapList, PlotSnapList, simulation_norm,
# mean_ejected_halo_array, std_ejected_halo_array,
# N_halo_array, mean_ejected_z, std_ejected_z, N_z,
# model_tags, "ejectedfraction")
#plot_quasars_count(SnapList, PlotSnapList, N_quasars_z, N_quasars_boost_z, N_z, mean_quasar_activity_array, std_quasar_activity_array, N_halo_array, mergers_halo_array, SMF, mergers_galaxy_array, fesc_prescription, simulation_norm, FirstFile, LastFile, NumFile, model_tags, "SN_Prescription")
plot_fesc_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_fesc_galaxy_array, std_fesc_galaxy_array,
N_galaxy_array, mean_fesc_halo_array,
std_fesc_halo_array, N_halo_array,
galaxy_halo_mass_mean, model_tags,
paper_plots, mass_global, fesc_global, Ngamma_global,
"fesc_paper")
plot_reionmod_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_reionmod_galaxy_array, std_reionmod_galaxy_array,
N_galaxy_array, mean_gnedin_reionmod_galaxy_array,
std_gnedin_reionmod_galaxy_array,
model_tags, paper_plots, "reionmod")
exit()
#plot_nion_galaxy(SnapList, PlotSnapList, simulation_norm,
# mean_Ngamma_galaxy_array, std_Ngamma_galaxy_array,
# N_galaxy_array, model_tags,
# paper_plots, "Ngamma")
'''
plot_photo_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_photo_galaxy_array, std_photo_galaxy_array,
N_galaxy_array, model_tags,
paper_plots, "photo")
'''
plot_sfr_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_sfr_galaxy_array, std_sfr_galaxy_array,
mean_ssfr_galaxy_array, std_ssfr_galaxy_array,
N_galaxy_array, model_tags, "sSFR")
#plot_fej_Ngamma(SnapList, PlotSnapList, simulation_norm,
# mean_Ngamma_fej, std_Ngamma_fej,
# N_fej, model_tags, "Ngamma_fej")
#plot_photoncount(SnapList, sum_Ngamma_z_array, simulation_norm, FirstFile, LastFile, NumFile, model_tags, "Ngamma_test") ## PARALELL COMPATIBLE
#plot_mvir_Ngamma(SnapList, mean_Ngamma_halo_array, std_Ngamma_halo_array, N_halo_array, model_tags, "Mvir_Ngamma_test", fesc_prescription, fesc_normalization, "/lustre/projects/p004_swin/jseiler/tiamat/halo_ngamma/") ## PARALELL COMPATIBLE
| 1.984375 | 2 |
tests/functional_tests/test_camera.py | accessai/access-face-vision | 3 | 13403 | from multiprocessing import Queue, Value
from time import sleep
from access_face_vision.source.camera import Camera
from access_face_vision.utils import create_parser
from access_face_vision import access_logger
LOG_LEVEL = 'debug'
logger, log_que, que_listener = access_logger.set_main_process_logger(LOG_LEVEL)
def test_camera():
logger.info('Starting Camera test')
cmd_args = create_parser()
camera = Camera(cmd_args, Queue(), log_que, LOG_LEVEL, Value('i',0), draw_frames=True)
camera.start()
sleep(60)
camera.stop()
logger.info('Camera test completed')
que_listener.stop()
if __name__ == '__main__':
test_camera()
| 2.515625 | 3 |
utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 5 | 13404 | from utils.deserializer.protobuf_deserializer import ProtoLoader
from pathlib import Path
import pandas as pd
import pytest
PROTOFILES_DIR_PATH = Path(__file__).parent.joinpath("protofilesdir").absolute().__str__()
INVALID_PATH = "some/wrong/path"
@pytest.mark.parametrize('filepath', ["test_file.pb", "test_file_1.txt", "test_file_2.xml"])
def test_should_return_single_df_sequence_regardless_file_extension(filepath):
loader = ProtoLoader(PROTOFILES_DIR_PATH)
sequence = loader.get_single_sequence(filepath)
assert isinstance(sequence, pd.DataFrame)
def test_should_return_not_none_when_directory_not_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
assert seq_list is not None
def test_should_return_correct_length_of_seq_list():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
assert len(seq_list) == 3
def test_should_return_empty_list_when_directory_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH + INVALID_PATH)
seq_list = loader.get_list_of_sequences()
assert len(seq_list) == 0
def test_should_check_for_list_when_directory_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH + INVALID_PATH)
seq_list = loader.get_list_of_sequences()
assert isinstance(seq_list, list)
def test_should_return_list_of_sequences():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
for seq in seq_list:
assert isinstance(seq, pd.DataFrame)
| 2.375 | 2 |
wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 1 | 13405 | #!/usr/bin/env python
# encoding: utf-8
"""
turbine.py
Created by <NAME> and <NAME> on 2014-01-13.
Copyright (c) NREL. All rights reserved.
"""
from openmdao.main.api import Assembly, Component
from openmdao.main.datatypes.api import Float, Array, Enum, Bool, Int
from openmdao.lib.drivers.api import FixedPointIterator
import numpy as np
#from rotorse.rotor import RotorSE
#from towerse.tower import TowerSE
#from commonse.rna import RNAMass, RotorLoads
from drivewpact.drive import DriveWPACT
from drivewpact.hub import HubWPACT
from commonse.csystem import DirectionVector
from commonse.utilities import interp_with_deriv, hstack, vstack
from drivese.drive import Drive4pt, Drive3pt
from drivese.drivese_utils import blade_moment_transform, blade_force_transform
from drivese.hub import HubSE, Hub_System_Adder_drive
from SEAMLoads.SEAMLoads import SEAMLoads
from SEAMTower.SEAMTower import SEAMTower
from SEAMAero.SEAM_AEP import SEAM_PowerCurve
from SEAMRotor.SEAMRotor import SEAMBladeStructure
# from SEAMGeometry.SEAMGeometry import SEAMGeometry
def connect_io(top, cls):
cls_name = cls.name
for name in cls.list_inputs():
try:
top.connect(name, cls_name + '.%s' % name)
except:
# print 'failed connecting', cls_name, name
pass
for name in cls.list_outputs():
try:
top.connect(cls_name + '.%s' % name, name)
except:
pass
def configure_turbine(assembly, with_new_nacelle=True, flexible_blade=False, with_3pt_drive=False):
"""a stand-alone configure method to allow for flatter assemblies
Parameters
----------
assembly : Assembly
an openmdao assembly to be configured
with_new_nacelle : bool
False uses the default implementation, True uses an experimental implementation designed
to smooth out discontinities making in amenable for gradient-based optimization
flexible_blade : bool
if True, internally solves the coupled aero/structural deflection using fixed point iteration.
Note that the coupling is currently only in the flapwise deflection, and is primarily
only important for highly flexible blades. If False, the aero loads are passed
to the structure but there is no further iteration.
"""
#SEAM variables ----------------------------------
#d2e = Float(0.73, iotype='in', desc='Dollars to Euro ratio'
assembly.add('rated_power',Float(3000., iotype='in', units='kW', desc='Turbine rated power', group='Global'))
assembly.add('hub_height', Float(100., iotype='in', units='m', desc='Hub height', group='Global'))
assembly.add('rotor_diameter', Float(110., iotype='in', units='m', desc='Rotor diameter', group='Global'))
# assembly.add('site_type',Enum('onshore', values=('onshore', 'offshore'), iotype='in', desc='Site type', group='Global'))
assembly.add('tower_bottom_diameter', Float(4., iotype='in', desc='Tower bottom diameter', group='Global'))
assembly.add('tower_top_diameter', Float(2., iotype='in', desc='Tower top diameter', group='Global'))
assembly.add('project_lifetime', Float(iotype = 'in', desc='Operating years', group='Global'))
assembly.add('rho_steel', Float(7.8e3, iotype='in', desc='density of steel', group='Tower'))
assembly.add('lifetime_cycles', Float(1.e7, iotype='in', desc='Equivalent lifetime cycles', group='Rotor'))
assembly.add('stress_limit_extreme_tower', Float(iotype='in', units='MPa', desc='Tower ultimate strength', group='Tower'))
assembly.add('stress_limit_fatigue_tower', Float(iotype='in', units='MPa', desc='Tower fatigue strength', group='Tower'))
assembly.add('safety_factor_tower', Float(iotype='in', desc='Tower loads safety factor', group='Tower'))
assembly.add('PMtarget_tower', Float(1., iotype='in', desc='', group='Tower'))
assembly.add('wohler_exponent_tower', Float(4., iotype='in', desc='Tower fatigue Wohler exponent', group='Tower'))
assembly.add('tower_z', Array(iotype='out', desc='Tower discretization'))
assembly.add('tower_wall_thickness', Array(iotype='out', units='m', desc='Tower wall thickness'))
assembly.add('tower_mass', Float(iotype='out', units='kg', desc='Tower mass'))
assembly.add('tsr', Float(iotype='in', units='m', desc='Design tip speed ratio', group='Aero'))
assembly.add('F', Float(iotype='in', desc='Rotor power loss factor', group='Aero'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Wohler Exponent blade flap', group='Rotor'))
assembly.add('nSigma4fatFlap', Float(iotype='in', desc='', group='Loads'))
assembly.add('nSigma4fatTower', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_flap', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_tower', Float(iotype='in', desc='', group='Loads'))
assembly.add('blade_edge_dynload_factor_ext', Float(iotype='in', desc='Extreme dynamic edgewise loads factor', group='Loads'))
assembly.add('blade_edge_dynload_factor_fat', Float(iotype='in', desc='Fatigue dynamic edgewise loads factor', group='Loads'))
assembly.add('PMtarget_blades', Float(1., iotype='in', desc='', group='Rotor'))
assembly.add('max_tipspeed', Float(iotype='in', desc='Maximum tip speed', group='Aero'))
assembly.add('n_wsp', Int(iotype='in', desc='Number of wind speed bins', group='Aero'))
assembly.add('min_wsp', Float(0.0, iotype = 'in', units = 'm/s', desc = 'min wind speed', group='Aero'))
assembly.add('max_wsp', Float(iotype = 'in', units = 'm/s', desc = 'max wind speed', group='Aero'))
assembly.add('turbulence_int', Float(iotype='in', desc='Reference turbulence intensity', group='Plant_AEP'))
# assembly.add('WeibullInput', Bool(True, iotype='in', desc='Flag for Weibull input', group='AEP'))
assembly.add('weibull_C', Float(iotype = 'in', units='m/s', desc = 'Weibull scale factor', group='AEP'))
assembly.add('weibull_k', Float(iotype = 'in', desc='Weibull shape or form factor', group='AEP'))
assembly.add('blade_sections', Int(iotype='in', desc='number of sections along blade', group='Rotor'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Blade flap fatigue Wohler exponent', group='Rotor'))
assembly.add('MaxChordrR', Float(iotype='in', units='m', desc='Spanwise position of maximum chord', group='Rotor'))
assembly.add('tif_blade_root_flap_ext', Float(1., iotype='in', desc='Technology improvement factor flap extreme', group='Rotor'))
assembly.add('tif_blade_root_edge_ext', Float(1., iotype='in', desc='Technology improvement factor edge extreme', group='Rotor'))
assembly.add('tif_blade_root_flap_fat', Float(1., iotype='in', desc='Technology improvement factor flap LEQ', group='Rotor'))
assembly.add('sc_frac_flap', Float(iotype='in', desc='spar cap fraction of chord', group='Rotor'))
assembly.add('sc_frac_edge', Float(iotype='in', desc='spar cap fraction of thickness', group='Rotor'))
assembly.add('safety_factor_blade', Float(iotype='in', desc='Blade loads safety factor', group='Rotor'))
assembly.add('stress_limit_extreme_blade', Float(iotype='in', units='MPa', desc='Blade ultimate strength', group='Rotor'))
assembly.add('stress_limit_fatigue_blade', Float(iotype='in', units='MPa', desc='Blade fatigue strength', group='Rotor'))
assembly.add('AddWeightFactorBlade', Float(iotype='in', desc='Additional weight factor for blade shell', group='Rotor'))
assembly.add('blade_material_density', Float(iotype='in', units='kg/m**3', desc='Average density of blade materials', group='Rotor'))
assembly.add('blade_mass', Float(iotype = 'out', units = 'kg', desc = 'Blade mass'))
# assembly.add('mean_wsp', Float(iotype = 'in', units = 'm/s', desc = 'mean wind speed', group='Aero')) # [m/s]
assembly.add('air_density', Float(iotype = 'in', units = 'kg/m**3', desc = 'density of air', group='Plant_AEP')) # [kg / m^3]
assembly.add('max_Cp', Float(iotype = 'in', desc = 'max CP', group='Aero'))
assembly.add('gearloss_const', Float(iotype = 'in', desc = 'Gear loss constant', group='Drivetrain'))
assembly.add('gearloss_var', Float(iotype = 'in', desc = 'Gear loss variable', group='Drivetrain'))
assembly.add('genloss', Float(iotype = 'in', desc = 'Generator loss', group='Drivetrain'))
assembly.add('convloss', Float(iotype = 'in', desc = 'Converter loss', group='Drivetrain'))
# Outputs
assembly.add('rated_wind_speed', Float(units = 'm / s', iotype='out', desc='wind speed for rated power'))
assembly.add('ideal_power_curve', Array(iotype='out', units='kW', desc='total power before losses and turbulence'))
assembly.add('power_curve', Array(iotype='out', units='kW', desc='total power including losses and turbulence'))
assembly.add('wind_curve', Array(iotype='out', units='m/s', desc='wind curve associated with power curve'))
assembly.add('aep', Float(iotype = 'out', units='mW*h', desc='Annual energy production in mWh'))
assembly.add('total_aep', Float(iotype = 'out', units='mW*h', desc='AEP for total years of production'))
# END SEAM Variables ----------------------
# Add SEAM components and connections
assembly.add('loads', SEAMLoads())
assembly.add('tower_design', SEAMTower(21))
assembly.add('blade_design', SEAMBladeStructure())
assembly.add('aep_calc', SEAM_PowerCurve())
assembly.driver.workflow.add(['loads', 'tower_design', 'blade_design', 'aep_calc'])
assembly.connect('loads.tower_bottom_moment_max', 'tower_design.tower_bottom_moment_max')
assembly.connect('loads.tower_bottom_moment_leq', 'tower_design.tower_bottom_moment_leq')
assembly.connect('loads.blade_root_flap_max', 'blade_design.blade_root_flap_max')
assembly.connect('loads.blade_root_edge_max', 'blade_design.blade_root_edge_max')
assembly.connect('loads.blade_root_flap_leq', 'blade_design.blade_root_flap_leq')
assembly.connect('loads.blade_root_edge_leq', 'blade_design.blade_root_edge_leq')
connect_io(assembly, assembly.aep_calc)
connect_io(assembly, assembly.loads)
connect_io(assembly, assembly.tower_design)
connect_io(assembly, assembly.blade_design)
# End SEAM add components and connections -------------
if with_new_nacelle:
assembly.add('hub',HubSE())
assembly.add('hubSystem',Hub_System_Adder_drive())
if with_3pt_drive:
assembly.add('nacelle', Drive3pt())
else:
assembly.add('nacelle', Drive4pt())
else:
assembly.add('nacelle', DriveWPACT())
assembly.add('hub', HubWPACT())
assembly.driver.workflow.add(['hub', 'nacelle'])
if with_new_nacelle:
assembly.driver.workflow.add(['hubSystem'])
# connections to hub and hub system
assembly.connect('blade_design.blade_mass', 'hub.blade_mass')
assembly.connect('loads.blade_root_flap_max', 'hub.rotor_bending_moment')
assembly.connect('rotor_diameter', ['hub.rotor_diameter'])
assembly.connect('blade_design.blade_root_diameter', 'hub.blade_root_diameter')
assembly.add('blade_number',Int(3,iotype='in',desc='number of blades', group='Aero'))
assembly.connect('blade_number', 'hub.blade_number')
if with_new_nacelle:
assembly.connect('rated_power','hub.machine_rating')
assembly.connect('rotor_diameter', ['hubSystem.rotor_diameter'])
assembly.connect('nacelle.MB1_location','hubSystem.MB1_location') # TODO: bearing locations
assembly.connect('nacelle.L_rb','hubSystem.L_rb')
assembly.add('rotor_tilt', Float(5.0, iotype='in', desc='rotor tilt', group='Rotor'))
assembly.connect('rotor_tilt','hubSystem.shaft_angle')
assembly.connect('hub.hub_diameter','hubSystem.hub_diameter')
assembly.connect('hub.hub_thickness','hubSystem.hub_thickness')
assembly.connect('hub.hub_mass','hubSystem.hub_mass')
assembly.connect('hub.spinner_mass','hubSystem.spinner_mass')
assembly.connect('hub.pitch_system_mass','hubSystem.pitch_system_mass')
# connections to nacelle #TODO: fatigue option variables
assembly.connect('rotor_diameter', 'nacelle.rotor_diameter')
assembly.connect('1.5 * aep_calc.rated_torque', 'nacelle.rotor_torque')
assembly.connect('loads.max_thrust', 'nacelle.rotor_thrust')
assembly.connect('aep_calc.rated_speed', 'nacelle.rotor_speed')
assembly.connect('rated_power', 'nacelle.machine_rating')
assembly.add('generator_speed',Float(1173.7,iotype='in',units='rpm',desc='speed of generator', group='Drivetrain')) # - should be in nacelle
assembly.connect('generator_speed/aep_calc.rated_speed', 'nacelle.gear_ratio')
assembly.connect('tower_top_diameter', 'nacelle.tower_top_diameter')
assembly.connect('blade_number * blade_design.blade_mass + hub.hub_system_mass', 'nacelle.rotor_mass') # assuming not already in rotor force / moments
# variable connections for new nacelle
if with_new_nacelle:
assembly.connect('blade_number','nacelle.blade_number')
assembly.connect('rotor_tilt','nacelle.shaft_angle')
assembly.connect('333.3 * rated_power / 1000.0','nacelle.shrink_disc_mass')
assembly.connect('blade_design.blade_root_diameter','nacelle.blade_root_diameter')
#moments - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Mxyz_0','moments.b1')
#assembly.connect('rotor.Mxyz_120','moments.b2')
#assembly.connect('rotor.Mxyz_240','moments.b3')
#assembly.connect('rotor.Pitch','moments.pitch_angle')
#assembly.connect('rotor.TotalCone','moments.cone_angle')
assembly.connect('1.5 * aep_calc.rated_torque','nacelle.rotor_bending_moment_x') #accounted for in ratedConditions.Q
#assembly.connect('moments.My','nacelle.rotor_bending_moment_y')
#assembly.connect('moments.Mz','nacelle.rotor_bending_moment_z')
#forces - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Fxyz_0','forces.b1')
#assembly.connect('rotor.Fxyz_120','forces.b2')
#assembly.connect('rotor.Fxyz_240','forces.b3')
#assembly.connect('rotor.Pitch','forces.pitch_angle')
#assembly.connect('rotor.TotalCone','forces.cone_angle')
assembly.connect('loads.max_thrust','nacelle.rotor_force_x')
#assembly.connect('forces.Fy','nacelle.rotor_force_y')
#assembly.connect('forces.Fz','nacelle.rotor_force_z')
class Turbine_SE_SEAM(Assembly):
def configure(self):
configure_turbine(self)
if __name__ == '__main__':
turbine = Turbine_SE_SEAM()
#=========== SEAM inputs
turbine.AddWeightFactorBlade = 1.2
turbine.blade_material_density = 2100.0
turbine.tower_bottom_diameter = 6.
turbine.tower_top_diameter = 3.78
turbine.blade_edge_dynload_factor_ext = 2.5
turbine.blade_edge_dynload_factor_fat = 0.75
turbine.F = 0.777
turbine.MaxChordrR = 0.2
turbine.project_lifetime = 20.0
turbine.lifetime_cycles = 10000000.0
turbine.blade_sections = 21
turbine.PMtarget_tower = 1.0
turbine.PMtarget_blades = 1.0
turbine.safety_factor_blade = 1.1
turbine.safety_factor_tower = 1.5
turbine.stress_limit_extreme_tower = 235.0
turbine.stress_limit_fatigue_tower = 14.885
turbine.stress_limit_extreme_blade = 200.0
turbine.stress_limit_fatigue_blade = 27.0
turbine.tif_blade_root_flap_ext = 1.0
turbine.tif_blade_root_flap_fat = 1.0
turbine.tif_blade_root_edge_ext = 1.0
turbine.weibull_C = 11.0
turbine.weibull_k = 2.0
turbine.wohler_exponent_blade_flap = 10.0
turbine.wohler_exponent_tower = 4.0
turbine.dLoad_dU_factor_flap = 0.9
turbine.dLoad_dU_factor_tower = 0.8
turbine.hub_height = 90.0
turbine.max_tipspeed = 80.0
turbine.n_wsp = 26
turbine.min_wsp = 0.0
turbine.max_wsp = 25.0
turbine.nSigma4fatFlap = 1.2
turbine.nSigma4fatTower = 0.8
turbine.rated_power = 5000.0
turbine.rho_steel = 7800.0
turbine.rotor_diameter = 126.0
turbine.sc_frac_edge = 0.8
turbine.sc_frac_flap = 0.3
turbine.tsr = 8.0
turbine.air_density = 1.225
turbine.turbulence_int = 0.16
turbine.max_Cp = 0.49
turbine.gearloss_const = 0.01 # Fraction
turbine.gearloss_var = 0.014 # Fraction
turbine.genloss = 0.03 # Fraction
turbine.convloss = 0.03 # Fraction
#==============
# === nacelle ======
turbine.blade_number = 3 # turbine level that must be added for SEAM
turbine.rotor_tilt = 5.0 # turbine level that must be added for SEAM
turbine.generator_speed = 1173.7
turbine.nacelle.L_ms = 1.0 # (Float, m): main shaft length downwind of main bearing in low-speed shaft
turbine.nacelle.L_mb = 2.5 # (Float, m): main shaft length in low-speed shaft
turbine.nacelle.h0_front = 1.7 # (Float, m): height of Ibeam in bedplate front
turbine.nacelle.h0_rear = 1.35 # (Float, m): height of Ibeam in bedplate rear
turbine.nacelle.drivetrain_design = 'geared'
turbine.nacelle.crane = True # (Bool): flag for presence of crane
turbine.nacelle.bevel = 0 # (Int): Flag for the presence of a bevel stage - 1 if present, 0 if not
turbine.nacelle.gear_configuration = 'eep' # (Str): tring that represents the configuration of the gearbox (stage number and types)
turbine.nacelle.Np = [3, 3, 1] # (Array): number of planets in each stage
turbine.nacelle.ratio_type = 'optimal' # (Str): optimal or empirical stage ratios
turbine.nacelle.shaft_type = 'normal' # (Str): normal or short shaft length
#turbine.nacelle.shaft_angle = 5.0 # (Float, deg): Angle of the LSS inclindation with respect to the horizontal
turbine.nacelle.shaft_ratio = 0.10 # (Float): Ratio of inner diameter to outer diameter. Leave zero for solid LSS
turbine.nacelle.carrier_mass = 8000.0 # estimated for 5 MW
turbine.nacelle.mb1Type = 'CARB' # (Str): Main bearing type: CARB, TRB or SRB
turbine.nacelle.mb2Type = 'SRB' # (Str): Second bearing type: CARB, TRB or SRB
turbine.nacelle.yaw_motors_number = 8.0 # (Float): number of yaw motors
turbine.nacelle.uptower_transformer = True
turbine.nacelle.flange_length = 0.5 #m
turbine.nacelle.gearbox_cm = 0.1
turbine.nacelle.hss_length = 1.5
turbine.nacelle.overhang = 5.0 #TODO - should come from turbine configuration level
turbine.nacelle.check_fatigue = 0 #0 if no fatigue check, 1 if parameterized fatigue check, 2 if known loads inputs
# =================
# === run ===
turbine.run()
print 'mass rotor blades (kg) =', turbine.blade_number * turbine.blade_design.blade_mass
print 'mass hub system (kg) =', turbine.hubSystem.hub_system_mass
print 'mass nacelle (kg) =', turbine.nacelle.nacelle_mass
print 'mass tower (kg) =', turbine.tower_design.tower_mass
# =================
| 1.726563 | 2 |
src/triage/component/results_schema/alembic/versions/5dd2ba8222b1_add_run_type.py | josephbajor/triage_NN | 160 | 13406 | """add run_type
Revision ID: 5dd2ba8222b1
Revises: 079a74c15e8b
Create Date: 2021-07-22 23:53:04.043651
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5dd2ba8222b1'
down_revision = '079a74c15e8b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('experiment_runs', sa.Column('run_type', sa.Text(), nullable=True), schema='triage_metadata')
op.execute("UPDATE triage_metadata.experiment_runs SET run_type='experiment' WHERE run_type IS NULL")
op.alter_column('experiment_runs', 'experiment_hash', nullable=True, new_column_name='run_hash', schema='triage_metadata')
op.drop_constraint('experiment_runs_experiment_hash_fkey', 'experiment_runs', type_='foreignkey', schema='triage_metadata')
op.execute("ALTER TABLE triage_metadata.experiment_runs RENAME TO triage_runs")
op.create_table('retrain',
sa.Column('retrain_hash', sa.Text(), nullable=False),
sa.Column('config', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column('prediction_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('retrain_hash'),
schema='triage_metadata',
)
op.alter_column('models', 'built_in_experiment_run', nullable=False, new_column_name='built_in_triage_run', schema='triage_metadata')
op.execute("CREATE TABLE triage_metadata.deprecated_models_built_by_experiment AS SELECT model_id, model_hash, built_by_experiment FROM triage_metadata.models")
op.drop_column('models', 'built_by_experiment', schema='triage_metadata')
op.create_table('retrain_models',
sa.Column('retrain_hash', sa.String(), nullable=False),
sa.Column('model_hash', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['retrain_hash'], ['triage_metadata.retrain.retrain_hash'], ),
sa.PrimaryKeyConstraint('retrain_hash', 'model_hash'),
schema='triage_metadata'
)
def downgrade():
op.execute("ALTER TABLE triage_metadata.triage_runs RENAME TO experiment_runs")
op.drop_column('experiment_runs', 'run_type', schema='triage_metadata')
op.alter_column('experiment_runs', 'run_hash', nullable=True, new_column_name='experiment_hash', schema='triage_metadata')
op.create_foreign_key('experiment_runs_experiment_hash_fkey', 'experiment_runs', 'experiments', ['experiment_hash'], ['experiment_hash'], source_schema='triage_metadata', referent_schema='triage_metadata')
op.drop_table('retrain_models', schema='triage_metadata')
op.drop_table('retrain', schema='triage_metadata')
op.add_column('models', sa.Column('built_by_experiment', sa.Text(), nullable=True), schema='triage_metadata')
op.alter_column('models', 'built_in_triage_run', nullable=False, new_column_name='built_in_experiment_run', schema='triage_metadata')
| 1.421875 | 1 |
projects/PanopticFCN_cityscapes/panopticfcn/__init__.py | fatihyildiz-cs/detectron2 | 166 | 13407 | from .config import add_panopticfcn_config
from .panoptic_seg import PanopticFCN
from .build_solver import build_lr_scheduler
| 1.109375 | 1 |
03_lecture_Django/lecture3/hello/views.py | MoStgt/CS50 | 0 | 13408 | <reponame>MoStgt/CS50
from http.client import HTTPResponse
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
# def index(request):
# return HttpResponse("Hello World!")
def index(request):
return render(request, "hello/index.html")
def brian(request):
return HttpResponse("Hello Brian")
def david(request):
return HttpResponse("Hello David")
# def greet(request, name):
# return HttpResponse(f"Hello, {name.capitalize()}!")
def greet(request, name):
return render(request, "hello/greet.html", {
"name": name.capitalize()
}) | 2.34375 | 2 |
txdav/common/datastore/upgrade/test/test_migrate.py | backwardn/ccs-calendarserver | 462 | 13409 | <filename>txdav/common/datastore/upgrade/test/test_migrate.py
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.upgrade.migrate}.
"""
from twext.enterprise.adbapi2 import Pickle
from twext.enterprise.dal.syntax import Delete
from twext.python.filepath import CachingFilePath
from txweb2.http_headers import MimeType
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twisted.internet.protocol import Protocol
from twisted.protocols.amp import AMP, Command, String
from twisted.python.modules import getModule
from twisted.python.reflect import qual, namedAny
from twisted.trial.unittest import TestCase
from twistedcaldav import customxml, caldavxml
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from txdav.base.propertystore.base import PropertyName
from txdav.caldav.datastore.test.common import CommonTests
from txdav.carddav.datastore.test.common import CommonTests as ABCommonTests
from txdav.common.datastore.file import CommonDataStore
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.test.util import SQLStoreBuilder
from txdav.common.datastore.test.util import (
populateCalendarsFrom, StubNotifierFactory, resetCalendarMD5s,
populateAddressBooksFrom, resetAddressBookMD5s, deriveValue,
withSpecialValue, CommonCommonTests
)
from txdav.common.datastore.upgrade.migrate import UpgradeToDatabaseStep, \
StoreSpawnerService, swapAMP
from txdav.xml import element
import copy
class CreateStore(Command):
"""
Create a store in a subprocess.
"""
arguments = [('delegateTo', String())]
class PickleConfig(Command):
"""
Unpickle some configuration in a subprocess.
"""
arguments = [('delegateTo', String()),
('config', Pickle())]
class StoreCreator(AMP):
"""
Helper protocol.
"""
@CreateStore.responder
def createStore(self, delegateTo):
"""
Create a store and pass it to the named delegate class.
"""
swapAMP(self, namedAny(delegateTo)(SQLStoreBuilder.childStore()))
return {}
@PickleConfig.responder
def pickleConfig(self, config, delegateTo):
# from twistedcaldav.config import config as globalConfig
# globalConfig._data = config._data
swapAMP(self, namedAny(delegateTo)(config))
return {}
class StubSpawner(StoreSpawnerService):
"""
Stub spawner service which populates the store forcibly.
"""
def __init__(self, config=None):
super(StubSpawner, self).__init__()
self.config = config
@inlineCallbacks
def spawnWithStore(self, here, there):
"""
'here' and 'there' are the helper protocols 'there' will expect to be
created with an instance of a store.
"""
master = yield self.spawn(AMP(), StoreCreator)
yield master.callRemote(CreateStore, delegateTo=qual(there))
returnValue(swapAMP(master, here))
@inlineCallbacks
def spawnWithConfig(self, config, here, there):
"""
Similar to spawnWithStore except the child process gets a configuration
object instead.
"""
master = yield self.spawn(AMP(), StoreCreator)
subcfg = copy.deepcopy(self.config)
del subcfg._postUpdateHooks[:]
yield master.callRemote(PickleConfig, config=subcfg,
delegateTo=qual(there))
returnValue(swapAMP(master, here))
class HomeMigrationTests(CommonCommonTests, TestCase):
"""
Tests for L{UpgradeToDatabaseStep}.
"""
av1 = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//calendarserver.org//Zonal//EN
BEGIN:VAVAILABILITY
ORGANIZER:mailto:<EMAIL>
UID:<EMAIL>
DTSTAMP:20061005T133225Z
DTEND:20140101T000000Z
BEGIN:AVAILABLE
UID:<EMAIL>
DTSTAMP:20061005T133225Z
SUMMARY:Monday to Friday from 9:00 to 17:00
DTSTART:20130101T090000Z
DTEND:20130101T170000Z
RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
END:AVAILABLE
END:VAVAILABILITY
END:VCALENDAR
""")
@inlineCallbacks
def setUp(self):
"""
Set up two stores to migrate between.
"""
yield super(HomeMigrationTests, self).setUp()
yield self.buildStoreAndDirectory(
extraUids=(
u"home1",
u"home2",
u"home3",
u"home_defaults",
u"home_no_splits",
u"home_splits",
u"home_splits_shared",
)
)
self.sqlStore = self.store
# Add some files to the file store.
self.filesPath = CachingFilePath(self.mktemp())
self.filesPath.createDirectory()
fileStore = self.fileStore = CommonDataStore(
self.filesPath, {"push": StubNotifierFactory()}, self.directory, True, True
)
self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore)
requirements = CommonTests.requirements
extras = deriveValue(self, "extraRequirements", lambda t: {})
requirements = self.mergeRequirements(requirements, extras)
yield populateCalendarsFrom(requirements, fileStore)
md5s = CommonTests.md5s
yield resetCalendarMD5s(md5s, fileStore)
self.filesPath.child("calendars").child(
"__uids__").child("ho").child("me").child("home1").child(
".some-extra-data").setContent("some extra data")
requirements = ABCommonTests.requirements
yield populateAddressBooksFrom(requirements, fileStore)
md5s = ABCommonTests.md5s
yield resetAddressBookMD5s(md5s, fileStore)
self.filesPath.child("addressbooks").child(
"__uids__").child("ho").child("me").child("home1").child(
".some-extra-data").setContent("some extra data")
# Add some properties we want to check get migrated over
txn = self.fileStore.newTransaction()
home = yield txn.calendarHomeWithUID("home_defaults")
cal = yield home.calendarWithName("calendar_1")
props = cal.properties()
props[PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)] = caldavxml.SupportedCalendarComponentSet(
caldavxml.CalendarComponent(name="VEVENT"),
caldavxml.CalendarComponent(name="VTODO"),
)
props[PropertyName.fromElement(element.ResourceType)] = element.ResourceType(
element.Collection(),
caldavxml.Calendar(),
)
props[PropertyName.fromElement(customxml.GETCTag)] = customxml.GETCTag.fromString("foobar")
inbox = yield home.calendarWithName("inbox")
props = inbox.properties()
props[PropertyName.fromElement(customxml.CalendarAvailability)] = customxml.CalendarAvailability.fromString(str(self.av1))
props[PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)] = caldavxml.ScheduleDefaultCalendarURL(
element.HRef.fromString("/calendars/__uids__/home_defaults/calendar_1"),
)
yield txn.commit()
def mergeRequirements(self, a, b):
"""
Merge two requirements dictionaries together, modifying C{a} and
returning it.
@param a: Some requirements, in the format of
L{CommonTests.requirements}.
@type a: C{dict}
@param b: Some additional requirements, to be merged into C{a}.
@type b: C{dict}
@return: C{a}
@rtype: C{dict}
"""
for homeUID in b:
homereq = a.setdefault(homeUID, {})
homeExtras = b[homeUID]
for calendarUID in homeExtras:
calreq = homereq.setdefault(calendarUID, {})
calendarExtras = homeExtras[calendarUID]
calreq.update(calendarExtras)
return a
@withSpecialValue(
"extraRequirements",
{
"home1": {
"calendar_1": {
"bogus.ics": (
getModule("twistedcaldav").filePath.sibling("zoneinfo")
.child("EST.ics").getContent(),
CommonTests.metadata1
)
}
}
}
)
@inlineCallbacks
def test_unknownTypeNotMigrated(self):
"""
The only types of calendar objects that should get migrated are VEVENTs
and VTODOs. Other component types, such as free-standing VTIMEZONEs,
don't have a UID and can't be stored properly in the database, so they
should not be migrated.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
self.assertIdentical(
None,
(yield (yield (yield (
yield txn.calendarHomeWithUID("home1")
).calendarWithName("calendar_1"))
).calendarObjectWithName("bogus.ics"))
)
@inlineCallbacks
def test_upgradeCalendarHomes(self):
"""
L{UpgradeToDatabaseService.startService} will do the upgrade, then
start its dependent service by adding it to its service hierarchy.
"""
# Create a fake directory in the same place as a home, but with a non-existent uid
fake_dir = self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("foobar")
fake_dir.makedirs()
# Create a fake file in the same place as a home,with a name that matches the hash uid prefix
fake_file = self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("home_file")
fake_file.setContent("")
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
for uid in CommonTests.requirements:
if CommonTests.requirements[uid] is not None:
self.assertNotIdentical(
None, (yield txn.calendarHomeWithUID(uid))
)
# Successfully migrated calendar homes are deleted
self.assertFalse(self.filesPath.child("calendars").child(
"__uids__").child("ho").child("me").child("home1").exists())
# Want metadata preserved
home = (yield txn.calendarHomeWithUID("home1"))
calendar = (yield home.calendarWithName("calendar_1"))
for name, metadata, md5 in (
("1.ics", CommonTests.metadata1, CommonTests.md5Values[0]),
("2.ics", CommonTests.metadata2, CommonTests.md5Values[1]),
("3.ics", CommonTests.metadata3, CommonTests.md5Values[2]),
):
object = (yield calendar.calendarObjectWithName(name))
self.assertEquals(object.getMetadata(), metadata)
self.assertEquals(object.md5(), md5)
@withSpecialValue(
"extraRequirements",
{
"nonexistent": {
"calendar_1": {
}
}
}
)
@inlineCallbacks
def test_upgradeCalendarHomesMissingDirectoryRecord(self):
"""
Test an upgrade where a directory record is missing for a home;
the original home directory will remain on disk.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
for uid in CommonTests.requirements:
if CommonTests.requirements[uid] is not None:
self.assertNotIdentical(
None, (yield txn.calendarHomeWithUID(uid))
)
self.assertIdentical(None, (yield txn.calendarHomeWithUID(u"nonexistent")))
# Skipped calendar homes are not deleted
self.assertTrue(self.filesPath.child("calendars").child(
"__uids__").child("no").child("ne").child("nonexistent").exists())
@inlineCallbacks
def test_upgradeExistingHome(self):
"""
L{UpgradeToDatabaseService.startService} will skip migrating existing
homes.
"""
startTxn = self.sqlStore.newTransaction("populate empty sample")
yield startTxn.calendarHomeWithUID("home1", create=True)
yield startTxn.commit()
yield self.upgrader.stepWithResult(None)
vrfyTxn = self.sqlStore.newTransaction("verify sample still empty")
self.addCleanup(vrfyTxn.commit)
home = yield vrfyTxn.calendarHomeWithUID("home1")
# The default calendar is still there.
self.assertNotIdentical(None, (yield home.calendarWithName("calendar")))
# The migrated calendar isn't.
self.assertIdentical(None, (yield home.calendarWithName("calendar_1")))
@inlineCallbacks
def test_upgradeAttachments(self):
"""
L{UpgradeToDatabaseService.startService} upgrades calendar attachments
as well.
"""
# Need to tweak config and settings to setup dropbox to work
self.patch(config, "EnableDropBox", True)
self.patch(config, "EnableManagedAttachments", False)
self.sqlStore.enableManagedAttachments = False
txn = self.sqlStore.newTransaction()
cs = schema.CALENDARSERVER
yield Delete(
From=cs,
Where=cs.NAME == "MANAGED-ATTACHMENTS"
).on(txn)
yield txn.commit()
txn = self.fileStore.newTransaction()
committed = []
def maybeCommit():
if not committed:
committed.append(True)
return txn.commit()
self.addCleanup(maybeCommit)
@inlineCallbacks
def getSampleObj():
home = (yield txn.calendarHomeWithUID("home1"))
calendar = (yield home.calendarWithName("calendar_1"))
object = (yield calendar.calendarObjectWithName("1.ics"))
returnValue(object)
inObject = yield getSampleObj()
someAttachmentName = "some-attachment"
someAttachmentType = MimeType.fromString("application/x-custom-type")
attachment = yield inObject.createAttachmentWithName(
someAttachmentName,
)
transport = attachment.store(someAttachmentType)
someAttachmentData = "Here is some data for your attachment, enjoy."
transport.write(someAttachmentData)
yield transport.loseConnection()
yield maybeCommit()
yield self.upgrader.stepWithResult(None)
committed = []
txn = self.sqlStore.newTransaction()
outObject = yield getSampleObj()
outAttachment = yield outObject.attachmentWithName(someAttachmentName)
allDone = Deferred()
class SimpleProto(Protocol):
data = ''
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
allDone.callback(self.data)
self.assertEquals(outAttachment.contentType(), someAttachmentType)
outAttachment.retrieve(SimpleProto())
allData = yield allDone
self.assertEquals(allData, someAttachmentData)
@inlineCallbacks
def test_upgradeAddressBookHomes(self):
"""
L{UpgradeToDatabaseService.startService} will do the upgrade, then
start its dependent service by adding it to its service hierarchy.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
for uid in ABCommonTests.requirements:
if ABCommonTests.requirements[uid] is not None:
self.assertNotIdentical(
None, (yield txn.addressbookHomeWithUID(uid))
)
# Successfully migrated addressbook homes are deleted
self.assertFalse(self.filesPath.child("addressbooks").child(
"__uids__").child("ho").child("me").child("home1").exists())
# Want metadata preserved
home = (yield txn.addressbookHomeWithUID("home1"))
adbk = (yield home.addressbookWithName("addressbook"))
for name, md5 in (
("1.vcf", ABCommonTests.md5Values[0]),
("2.vcf", ABCommonTests.md5Values[1]),
("3.vcf", ABCommonTests.md5Values[2]),
):
object = (yield adbk.addressbookObjectWithName(name))
self.assertEquals(object.md5(), md5)
@inlineCallbacks
def test_upgradeProperties(self):
"""
L{UpgradeToDatabaseService.startService} will do the upgrade, then
start its dependent service by adding it to its service hierarchy.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
# Want metadata preserved
home = (yield txn.calendarHomeWithUID("home_defaults"))
cal = (yield home.calendarWithName("calendar_1"))
inbox = (yield home.calendarWithName("inbox"))
# Supported components
self.assertEqual(cal.getSupportedComponents(), "VEVENT")
self.assertTrue(cal.properties().get(PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)) is None)
# Resource type removed
self.assertTrue(cal.properties().get(PropertyName.fromElement(element.ResourceType)) is None)
# Ctag removed
self.assertTrue(cal.properties().get(PropertyName.fromElement(customxml.GETCTag)) is None)
# Availability
self.assertEquals(str(home.getAvailability()), str(self.av1))
self.assertTrue(inbox.properties().get(PropertyName.fromElement(customxml.CalendarAvailability)) is None)
# Default calendar
self.assertTrue(home.isDefaultCalendar(cal))
self.assertTrue(inbox.properties().get(PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)) is None)
def test_fileStoreFromPath(self):
"""
Verify that fileStoreFromPath() will return a CommonDataStore if
the given path contains either "calendars" or "addressbooks"
sub-directories. Otherwise it returns None
"""
# No child directories
docRootPath = CachingFilePath(self.mktemp())
docRootPath.createDirectory()
step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
self.assertEquals(step, None)
# "calendars" child directory exists
childPath = docRootPath.child("calendars")
childPath.createDirectory()
step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
self.assertTrue(isinstance(step, CommonDataStore))
childPath.remove()
# "addressbooks" child directory exists
childPath = docRootPath.child("addressbooks")
childPath.createDirectory()
step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
self.assertTrue(isinstance(step, CommonDataStore))
childPath.remove()
| 1.65625 | 2 |
generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 2 | 13410 | <reponame>radekg/netapp-ontap-lib-get<filename>generated-libraries/python/netapp/fcp/aliases_info.py
from netapp.netapp_object import NetAppObject
class AliasesInfo(NetAppObject):
"""
A list of WWPNs and their aliases generated according
to the input - alias, WWPN or nothing.
"""
_vserver = None
@property
def vserver(self):
"""
Vserver containing the alias
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_aliases_wwpn = None
@property
def aliases_wwpn(self):
"""
The FCP WWPN for which the alias is given
"""
return self._aliases_wwpn
@aliases_wwpn.setter
def aliases_wwpn(self, val):
if val != None:
self.validate('aliases_wwpn', val)
self._aliases_wwpn = val
_aliases_alias = None
@property
def aliases_alias(self):
"""
The 32-character alias for a given FCP WWPN
"""
return self._aliases_alias
@aliases_alias.setter
def aliases_alias(self, val):
if val != None:
self.validate('aliases_alias', val)
self._aliases_alias = val
@staticmethod
def get_api_name():
return "aliases-info"
@staticmethod
def get_desired_attrs():
return [
'vserver',
'aliases-wwpn',
'aliases-alias',
]
def describe_properties(self):
return {
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'aliases_wwpn': { 'class': basestring, 'is_list': False, 'required': 'required' },
'aliases_alias': { 'class': basestring, 'is_list': False, 'required': 'required' },
}
| 2.15625 | 2 |
imagernn/generic_batch_generator.py | OnlyBelter/learn_neuralTalk | 7 | 13411 | <filename>imagernn/generic_batch_generator.py
import numpy as np
import code
from imagernn.utils import merge_init_structs, initw, accumNpDicts
from imagernn.lstm_generator import LSTMGenerator
from imagernn.rnn_generator import RNNGenerator
def decodeGenerator(generator):
if generator == 'lstm':
return LSTMGenerator
if generator == 'rnn':
return RNNGenerator
else:
raise Exception('generator %s is not yet supported' % (base_generator_str,))
class GenericBatchGenerator:
"""
Base batch generator class.
This class is aware of the fact that we are generating
sentences from images.
"""
@staticmethod
def init(params, misc):
# inputs
image_encoding_size = params.get('image_encoding_size', 128)
word_encoding_size = params.get('word_encoding_size', 128)
hidden_size = params.get('hidden_size', 128)
generator = params.get('generator', 'lstm')
vocabulary_size = len(misc['wordtoix'])
output_size = len(misc['ixtoword']) # these should match though
image_size = 4096 # size of CNN vectors hardcoded here
if generator == 'lstm':
assert image_encoding_size == word_encoding_size, 'this implementation does not support different sizes for these parameters'
# initialize the encoder models
model = {}
model['We'] = initw(image_size, image_encoding_size) # image encoder
model['be'] = np.zeros((1,image_encoding_size))
model['Ws'] = initw(vocabulary_size, word_encoding_size) # word encoder
update = ['We', 'be', 'Ws']
regularize = ['We', 'Ws']
init_struct = { 'model' : model, 'update' : update, 'regularize' : regularize}
# descend into the specific Generator and initialize it
# why generate again?? Belter, 20170510
Generator = decodeGenerator(generator)
generator_init_struct = Generator.init(word_encoding_size, hidden_size, output_size)
merge_init_structs(init_struct, generator_init_struct)
return init_struct
@staticmethod
def forward(batch, model, params, misc, predict_mode = False):
""" iterates over items in the batch and calls generators on them """
# we do the encoding here across all images/words in batch in single matrix
# multiplies to gain efficiency. The RNNs are then called individually
# in for loop on per-image-sentence pair and all they are concerned about is
# taking single matrix of vectors and doing the forward/backward pass without
# knowing anything about images, sentences or anything of that sort.
# encode all images
# concatenate as rows. If N is number of image-sentence pairs,
# F will be N x image_size
F = np.row_stack(x['image']['feat'] for x in batch)
We = model['We']
be = model['be']
Xe = F.dot(We) + be # Xe becomes N x image_encoding_size
# decode the generator we wish to use
generator_str = params.get('generator', 'lstm')
Generator = decodeGenerator(generator_str)
# encode all words in all sentences (which exist in our vocab)
wordtoix = misc['wordtoix']
Ws = model['Ws']
gen_caches = []
Ys = [] # outputs
for i,x in enumerate(batch):
# take all words in this sentence and pluck out their word vectors
# from Ws. Then arrange them in a single matrix Xs
# Note that we are setting the start token as first vector
# and then all the words afterwards. And start token is the first row of Ws
ix = [0] + [ wordtoix[w] for w in x['sentence']['tokens'] if w in wordtoix ]
Xs = np.row_stack( [Ws[j, :] for j in ix] )
Xi = Xe[i,:]
# forward prop through the RNN
gen_Y, gen_cache = Generator.forward(Xi, Xs, model, params, predict_mode = predict_mode)
gen_caches.append((ix, gen_cache))
Ys.append(gen_Y)
# back up information we need for efficient backprop
cache = {}
if not predict_mode:
# ok we need cache as well because we'll do backward pass
cache['gen_caches'] = gen_caches
cache['Xe'] = Xe
cache['Ws_shape'] = Ws.shape
cache['F'] = F
cache['generator_str'] = generator_str
return Ys, cache
@staticmethod
def backward(dY, cache):
Xe = cache['Xe']
generator_str = cache['generator_str']
dWs = np.zeros(cache['Ws_shape'])
gen_caches = cache['gen_caches']
F = cache['F']
dXe = np.zeros(Xe.shape)
Generator = decodeGenerator(generator_str)
# backprop each item in the batch
grads = {}
for i in xrange(len(gen_caches)):
ix, gen_cache = gen_caches[i] # unpack
local_grads = Generator.backward(dY[i], gen_cache)
dXs = local_grads['dXs'] # intercept the gradients wrt Xi and Xs
del local_grads['dXs']
dXi = local_grads['dXi']
del local_grads['dXi']
accumNpDicts(grads, local_grads) # add up the gradients wrt model parameters
# now backprop from dXs to the image vector and word vectors
dXe[i,:] += dXi # image vector
for n,j in enumerate(ix): # and now all the other words
dWs[j,:] += dXs[n,:]
# finally backprop into the image encoder
dWe = F.transpose().dot(dXe)
dbe = np.sum(dXe, axis=0, keepdims = True)
accumNpDicts(grads, { 'We':dWe, 'be':dbe, 'Ws':dWs })
return grads
@staticmethod
def predict(batch, model, params, **kwparams):
""" some code duplication here with forward pass, but I think we want the freedom in future """
F = np.row_stack(x['image']['feat'] for x in batch)
We = model['We']
be = model['be']
Xe = F.dot(We) + be # Xe becomes N x image_encoding_size
generator_str = params['generator']
Generator = decodeGenerator(generator_str)
Ys = []
for i,x in enumerate(batch):
gen_Y = Generator.predict(Xe[i, :], model, model['Ws'], params, **kwparams)
Ys.append(gen_Y)
return Ys
| 2.65625 | 3 |
ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0 | 13412 | <reponame>godfatherlmh/LoLAnalyzer
# Update the working patch and champions list
from __future__ import print_function
import configparser
import json
import os
import urllib.request
from datetime import datetime
from slugify import slugify
from collections import OrderedDict
from InterfaceAPI import InterfaceAPI
def run():
config = configparser.ConfigParser()
if os.path.isfile('config.ini'):
config.read('config.ini')
API_KEY = config['PARAMS']['api-key']
else:
def validationInput(msg, validAns):
while True:
ans = input(msg)
if ans.lower() in validAns:
return ans
print('Incorrect value. Only', validAns, 'are accepted')
config.add_section('PARAMS')
config.add_section('LEAGUES')
config.add_section('REGIONS')
config.add_section('PATCHES')
config.add_section('CHAMPIONS')
config.add_section('ROLES')
config.add_section('TOP')
config.add_section('JUNGLE')
config.add_section('MID')
config.add_section('CARRY')
config.add_section('SUPPORT')
print("No config file found. Let's set up a few parameters (you may change them anytime by manually editing config.ini).")
API_KEY = input('- API-KEY (https://developer.riotgames.com/): ')
config['PARAMS']['api-key'] = API_KEY
config['PARAMS']['database'] = input('- Database location (eg. C:\LoLAnalyzerDB): ')
print('Leagues you want to download games from (y/n): ')
print('challenger league enabled by default')
config['LEAGUES']['challenger'] = 'yes'
config['LEAGUES']['master'] = 'yes' if validationInput('- master: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['master'] == 'yes' :
print('Lower leagues are not recommended unless you have a high rate API-KEY (not given by default)')
config['LEAGUES']['diamond'] = 'yes' if validationInput('- diamond: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['diamond'] == 'yes' :
config['LEAGUES']['platinum'] = 'yes' if validationInput('- platinum: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['platinum'] == 'yes' :
config['LEAGUES']['gold'] = 'yes' if validationInput('- gold: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['gold'] == 'yes' :
config['LEAGUES']['silver'] = 'yes' if validationInput('- silver: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['silver'] == 'yes' :
config['LEAGUES']['bronze'] = 'yes' if validationInput('- bronze: ', ['y', 'n']) == 'y' else 'no'
print('Regions you want to download games from (y/n):')
print('API-KEY limitations are server-bounded, so you will download way more games enabling everything')
config['REGIONS']['ru'] = 'yes' if validationInput('- ru: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['kr'] = 'yes' if validationInput('- kr: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['br1'] = 'yes' if validationInput('- br1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['oc1'] = 'yes' if validationInput('- oc1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['jp1'] = 'yes' if validationInput('- jp1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['na1'] = 'yes' if validationInput('- na1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['eun1'] = 'yes' if validationInput('- eun1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['euw1'] = 'yes' if validationInput('- euw1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['tr1'] = 'yes' if validationInput('- tr1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['la1'] = 'yes' if validationInput('- la1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['la2'] = 'yes' if validationInput('- la2: ', ['y', 'n']) == 'y' else 'no'
# Update to current patch & champions list
# euw1 is used as reference
api = InterfaceAPI(API_KEY)
PATCHES = api.getData('https://euw1.api.riotgames.com/lol/static-data/v3/versions')
PATCHES = ['.'.join(s.split('.')[:2]) for s in reversed(PATCHES)]
config['PARAMS']['download_patches'] = PATCHES[-1]
print('Current patch set to:', config['PARAMS']['download_patches'])
PATCHES = OrderedDict((x, True) for x in PATCHES).keys()
config['PARAMS']['patches'] = ','.join(PATCHES)
print('Patch list updated')
json_data = api.getData('https://euw1.api.riotgames.com/lol/static-data/v3/champions', data={'locale': 'en_US', 'dataById': 'true'})
CHAMPIONS = json_data['data']
sortedChamps = []
for champ_id, champ_info in CHAMPIONS.items():
slugname = slugify(champ_info['name'], separator='')
config['CHAMPIONS'][slugname] = champ_id
sortedChamps.append(slugname)
# We need to sort champions by release for the neural network
# This is really important for the compatibility of the system over the patches
# Unfortunately the API doesn't give this information, so we use: http://universe-meeps.leagueoflegends.com/v1/en_us/champion-browse/index.json
response = urllib.request.urlopen('http://universe-meeps.leagueoflegends.com/v1/en_us/champion-browse/index.json')
data = json.loads(response.read().decode())
champ_date = {}
for champ in data['champions']:
date = champ['release-date']
date = date[1:] if date[0] == ' ' else date # solve a problem on annie
date = date[:10] # solve a problem on aatrox
champ_date[slugify(champ['name'], separator='')] = datetime.strptime(date, '%Y-%m-%d')
sortedChamps.sort(key=lambda x: (champ_date[x], x)) # sorted by date and then abc order (eg. annie/yi or xhaya/rakan)
config['PARAMS']['sortedChamps'] = ','.join(sortedChamps)
print('Champions list updated')
with open('config.ini', 'w') as configfile:
config.write(configfile)
print('-- Update complete --')
if __name__ == '__main__':
run()
| 2.40625 | 2 |
app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 1 | 13413 | # Generated by Django 3.0.7 on 2020-07-09 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20200709_1430'),
]
operations = [
migrations.AlterField(
model_name='location',
name='lat',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='location',
name='lng',
field=models.IntegerField(blank=True, null=True),
),
]
| 1.5 | 2 |
tests/test_fitting.py | adrdrew/viroconcom | 0 | 13414 | import unittest
import csv
import numpy as np
from viroconcom.fitting import Fit
def read_benchmark_dataset(path='tests/testfiles/1year_dataset_A.txt'):
"""
Reads a datasets provided for the environmental contour benchmark.
Parameters
----------
path : string
Path to dataset including the file name, defaults to 'examples/datasets/A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][
1:] # Ignore first char (is a white space).
y_label = row[2][
1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
class FittingTest(unittest.TestCase):
def test_2d_fit(self):
"""
2-d Fit with Weibull and Lognormal distribution.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_1 = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents spectral peak period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.4165147571863412, places=5)
self.assertAlmostEqual(dist0.scale(0), 2.833833521811032, places=5)
self.assertAlmostEqual(dist0.loc(0), 0.07055663251419833, places=5)
self.assertAlmostEqual(dist1.shape(0), 0.17742685807554776 , places=5)
#self.assertAlmostEqual(dist1.scale, 7.1536437634240135+2.075539206642004e^{0.1515051024957754x}, places=5)
self.assertAlmostEqual(dist1.loc, None, places=5)
# Now use a 2-parameter Weibull distribution instead of 3-p distr.
dist_description_0 = {'name': 'Weibull_2p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
self.assertEqual(str(my_fit)[0:5], 'Fit()')
def test_2d_benchmark_case(self):
"""
Reproduces the baseline results presented in doi: 10.1115/OMAE2019-96523 .
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset(
path='tests/testfiles/allyears_dataset_A.txt')
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 0.5}
dist_description_1 = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
'functions': ('exp3', None, 'power3')} # Shape, location, scale.
# Compute the fit.
my_fit = Fit((sample_hs, sample_tz),
(dist_description_0, dist_description_1))
# Evaluate the fitted parameters.
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.48, delta=0.02)
self.assertAlmostEqual(dist0.scale(0), 0.944, delta=0.01)
self.assertAlmostEqual(dist0.loc(0), 0.0981, delta=0.001)
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.001)
self.assertAlmostEqual(dist1.shape.b, 0.308, delta=0.002)
self.assertAlmostEqual(dist1.shape.c, -0.250, delta=0.002)
self.assertAlmostEqual(dist1.scale.a, 1.47 , delta=0.02)
self.assertAlmostEqual(dist1.scale.b, 0.214, delta=0.002)
self.assertAlmostEqual(dist1.scale.c, 0.641, delta=0.002)
self.assertAlmostEqual(dist1.scale(0), 4.3 , delta=0.1)
self.assertAlmostEqual(dist1.scale(2), 6, delta=0.1)
self.assertAlmostEqual(dist1.scale(5), 8, delta=0.1)
def test_2d_exponentiated_wbl_fit(self):
"""
Tests if a 2D fit that includes an exp. Weibull distribution works.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_hs = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents zero-upcrossing period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_tz = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_hs]
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'power3')
# Shape, Location, Scale
}
# Fit the model to the data, first test a 1D fit.
fit = Fit(sample_hs, dist_description_hs)
# Now perform the 2D fit.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
dist0 = fit.mul_var_dist.distributions[0]
self.assertGreater(dist0.shape(0), 1) # Should be about 1.5.
self.assertLess(dist0.shape(0), 2)
self.assertIsNone(dist0.loc(0)) # Has no location parameter, should be None.
self.assertGreater(dist0.scale(0), 2) # Should be about 3.
self.assertLess(dist0.scale(0), 4)
self.assertGreater(dist0.shape2(0), 0.5) # Should be about 1.
self.assertLess(dist0.shape2(0), 2)
def test_fit_lnsquare2(self):
"""
Tests a 2D fit that includes an logarithm square dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.scale.a, 1) # Should be about 1-5
self.assertLess(dist1.scale.a, 5) # Should be about 1-5
self.assertGreater(dist1.scale.b, 2) # Should be about 2-10
self.assertLess(dist1.scale.b, 10) # Should be about 2-10
self.assertGreater(dist1.scale(0), 0.1)
self.assertLess(dist1.scale(0), 10)
self.assertEqual(dist1.scale.func_name, 'lnsquare2')
def test_fit_powerdecrease3(self):
"""
Tests a 2D fit that includes an powerdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('powerdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.shape.a, -0.1) # Should be about 0
self.assertLess(dist1.shape.a, 0.1) # Should be about 0
self.assertGreater(dist1.shape.b, 1.5) # Should be about 2-5
self.assertLess(dist1.shape.b, 6) # Should be about 2-10
self.assertGreater(dist1.shape.c, 0.8) # Should be about 1.1
self.assertLess(dist1.shape.c, 2) # Should be about 1.1
self.assertGreater(dist1.shape(0), 0.25) # Should be about 0.35
self.assertLess(dist1.shape(0), 0.4) # Should be about 0.35
self.assertEqual(dist1.shape.func_name, 'powerdecrease3')
def test_fit_asymdecrease3(self):
"""
Tests a 2D fit that includes an asymdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('asymdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.1) # Should be about 0
self.assertAlmostEqual(dist1.shape.b, 0.35, delta=0.4) # Should be about 0.35
self.assertAlmostEqual(np.abs(dist1.shape.c), 0.45, delta=0.2) # Should be about 0.45
self.assertAlmostEquals(dist1.shape(0), 0.35, delta=0.2) # Should be about 0.35
def test_min_number_datapoints_for_fit(self):
"""
Tests if the minimum number of datapoints required for a fit works.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 10
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_10 = dist1.scale.a
# Now require more datapoints for a fit.
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 500
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_500 = dist1.scale.a
# Because in case 2 fewer bins have been used we should get different
# coefficients for the dependence function.
self.assertNotEqual(a_min_10, a_min_500)
def test_multi_processing(selfs):
"""
2-d Fit with multiprocessing (specified by setting a value for timeout)
"""
# Define a sample and a fit.
prng = np.random.RandomState(42)
sample_1 = prng.weibull(1.5, 1000)*3
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
dist_description_0 = {'name': 'Weibull',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1),
timeout=10)
def test_wbl_fit_with_negative_location(self):
"""
Tests fitting a translated Weibull distribution which would result
in a negative location parameter.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_3p',
'dependency': (None, None, None)}
# Fit the model to the data.
fit = Fit((sample_hs, ),
(dist_description_hs, ))
# Correct values for 10 years of data can be found in
# 10.1115/OMAE2019-96523 . Here we used 1 year of data.
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
self.assertGreater(dist0.loc(0), 0.0) # Should be 0.0981
self.assertLess(dist0.loc(0), 0.3) # Should be 0.0981
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
# Shift the wave data with -1 m and fit again.
sample_hs = sample_hs - 2
# Negative location values will be set to zero instead and a
# warning will be raised.
with self.assertWarns(RuntimeWarning):
fit = Fit((sample_hs, ),
(dist_description_hs, ))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
# Should be estimated to be 0.0981 - 2 and corrected to be 0.
self.assertEqual(dist0.loc(0), 0)
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
def test_omae2020_wind_wave_model(self):
"""
Tests fitting the wind-wave model that was used in the publication
'Global hierarchical models for wind and wave contours' on dataset D.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0), 2.42, delta=1)
self.assertAlmostEqual(dist0.scale(0), 10.0, delta=2)
self.assertAlmostEqual(dist0.shape2(0), 0.761, delta=0.5)
dist1 = fit.mul_var_dist.distributions[1]
self.assertEqual(dist1.shape2(0), 5)
inspection_data1 = fit.multiple_fit_inspection_data[1]
self.assertEqual(inspection_data1.shape2_value[0], 5)
self.assertAlmostEqual(inspection_data1.shape_value[0], 0.8, delta=0.5) # interval centered at 1
self.assertAlmostEqual(inspection_data1.shape_value[4], 1.5, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.shape_value[9], 2.5, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.shape(0), 0.8, delta=0.3)
self.assertAlmostEqual(dist1.shape(10), 1.6, delta=0.5)
self.assertAlmostEqual(dist1.shape(20), 2.3, delta=0.7)
self.assertAlmostEqual(dist1.shape.a, 0.582, delta=0.5)
self.assertAlmostEqual(dist1.shape.b, 1.90, delta=1)
self.assertAlmostEqual(dist1.shape.c, 0.248, delta=0.5)
self.assertAlmostEqual(dist1.shape.d, 8.49, delta=5)
self.assertAlmostEqual(inspection_data1.scale_value[0], 0.15, delta=0.2) # interval centered at 1
self.assertAlmostEqual(inspection_data1.scale_value[4], 1, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.scale_value[9], 4, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.scale(0), 0.15, delta=0.5)
self.assertAlmostEqual(dist1.scale(10), 1, delta=0.5)
self.assertAlmostEqual(dist1.scale(20), 4, delta=1)
self.assertAlmostEqual(dist1.scale.a, 0.394, delta=0.5)
self.assertAlmostEqual(dist1.scale.b, 0.0178, delta=0.1)
self.assertAlmostEqual(dist1.scale.c, 1.88, delta=0.8)
def test_wrong_model(self):
"""
Tests wheter errors are raised when incorrect fitting models are
specified.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# This structure is incorrect as there is not distribution called 'something'.
dist_description_v = {'name': 'something',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, None, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as there is not dependence function called 'something'.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('something', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as there will be only 1 or 2 intervals
# that fit 2000 datapoints.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 2000}
with self.assertRaises(RuntimeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as alpha3 is only compatible with
# logistics4 .
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('power3', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(TypeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Lognormal',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, 5, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, 5, None), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
def test_weighting_of_dependence_function(self):
"""
Tests if using weights when the dependence function is fitted works
correctly.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': False}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_no_weights = fit.mul_var_dist.distributions[1]
# Now perform a fit with weights.
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': True}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_with_weights = fit.mul_var_dist.distributions[1]
# Make sure the two fitted dependnece functions are different.
d = np.abs(dist1_with_weights.scale(0) - dist1_no_weights.scale(0)) / \
np.abs(dist1_no_weights.scale(0))
self.assertGreater(d, 0.01)
# Make sure they are not too different.
d = np.abs(dist1_with_weights.scale(20) - dist1_no_weights.scale(20)) / \
np.abs(dist1_no_weights.scale(20))
self.assertLess(d, 0.5)
| 3.234375 | 3 |
python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | 4 | 13415 | <filename>python/scripts/wavsep/wavsep.py<gh_stars>1-10
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2012 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script tests ZAP against wavsep: http://code.google.com/p/wavsep/
# Note wavsep has to be installed somewhere - the above link is to the
# project not the test suite!
#
# To this script:
# * Install the ZAP Python API:
# Use 'pip install python-owasp-zap-v2' or
# download from https://github.com/zaproxy/zaproxy/wiki/Downloads
# * Start ZAP (as this is for testing purposes you might not want the
# 'standard' ZAP to be started)
# * Access wavsep via your browser, proxying through ZAP
# * Vist all of the wavsep top level URLs, eg
# http://localhost:8080/wavsep/index-active.jsp
# http://localhost:8080/wavsep/index-passive.jsp
# * Run the Spider against http://localhost:8080
# * Run the Active Scanner against http://localhost:8080/wavsep
# * Run this script
# * Open the report.html file generated in your browser
#
# Notes:
# This has been tested against wavsep 1.5
from zapv2 import ZAPv2
import datetime, sys, getopt
def main(argv):
# -------------------------------------------------------------------------
# Default Configurations - use -h and -p for different host and port
# -------------------------------------------------------------------------
zapHost = '127.0.0.1'
zapPort = '8090'
try:
opts, args = getopt.getopt(argv,"h:p:")
except getopt.GetoptError:
print 'wavsep.py -h <ZAPhost> -p <ZAPport>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
zapHost = arg
elif opt == '-p':
zapPort = arg
zapUrl = 'http://' + zapHost + ':' + zapPort
# Dictionary of abbreviation to keep the output a bit shorter
abbrev = {
'Active Vulnerability title' : 'Ex',\
'Cross Site Scripting (DOM Based)' : 'DXSS',\
'Cross Site Scripting (Reflected)' : 'RXSS',\
'Absence of Anti-CSRF Tokens' : 'NoCSRF',\
'Application Error Disclosure' : 'AppError',\
'Anti CSRF Tokens Scanner' : 'ACSRF',\
'Buffer Overflow' : 'Buffer',\
'Cookie set without HttpOnly flag' : 'HttpOnly',\
'Cookie Slack Detector' : 'CookieSlack',\
'Cross Site Request Forgery' : 'CSRF',\
'External Redirect' : 'ExtRedir',\
'Format String Error' : 'Format',\
'HTTP Parameter Override' : 'ParamOver',\
'Information disclosure - database error messages' : 'InfoDb',\
'Information disclosure - debug error messages' : 'InfoDebug',\
'Information Disclosure - Sensitive Informations in URL' : 'InfoUrl',\
'LDAP Injection' : 'LDAP',\
'Loosely Scoped Cookie' : 'CookieLoose',\
'None. Warning only.' : 'NoCSRF2',\
'Password Autocomplete in browser' : 'Auto',\
'Path Traversal' : 'PathTrav',\
'Private IP Disclosure' : 'PrivIP',\
'Remote File Inclusion' : 'RFI',\
'Session ID in URL Rewrite' : 'SessRewrite',\
'Source Code Disclosure - File Inclusion' : 'SrcInc',\
'SQL Injection' : 'SQLi',\
'SQL Injection - MySQL' : 'SqlMySql',\
'SQL Injection - Generic SQL RDBMS' : 'SqlGen',\
'SQL Injection - Boolean Based' : 'SqlBool',\
'SQL Injection - Error Based - Generic SQL RDBMS' : 'SqlGenE',\
'SQL Injection - Error Based - MySQL' : 'SqlMySqlE',\
'SQL Injection - Error Based - Java' : 'SqlJavaE',\
'SQL Injection - Hypersonic SQL - Time Based' : 'SqlHyperT',\
'SQL Injection - MySQL - Time Based' : 'SqlMySqlT',\
'SQL Injection - Oracle - Time Based' : 'SqlOracleT',\
'SQL Injection - PostgreSQL - Time Based' : 'SqlPostgreT',\
'URL Redirector Abuse' : 'UrlRedir',\
'Viewstate without MAC signature (Unsure)' : 'ViewstateNoMac',\
'Weak Authentication Method' : 'WeakAuth',\
'Web Browser XSS Protection Not Enabled' : 'XSSoff',\
'X-Content-Type-Options Header Missing' : 'XContent',\
'X-Frame-Options Header Not Set' : 'XFrame'}
# The rules to apply:
# Column 1: String to match against an alert URL
# Column 2: Alert abbreviation to match
# Column 3: pass, fail, ignore
#
rules = [ \
# All these appear to be valid ;)
['-', 'InfoDebug', 'ignore'], \
['-', 'InfoUrl', 'ignore'], \
['-', 'ACSRF', 'ignore'], \
['-', 'ACSRF', 'ignore'], \
['-', 'Ex', 'ignore'], \
['-', 'CookieLoose', 'ignore'], \
['-', 'CookieSlack', 'ignore'], \
['-', 'NoCSRF2', 'ignore'], \
['-', 'ParamOver', 'ignore'], \
['-', 'PrivIP', 'ignore'], \
['-', 'SrcInc', 'ignore'], \
['-', 'XFrame', 'ignore'], \
['-', 'XContent', 'ignore'], \
['-', 'XSSoff', 'ignore'], \
['LFI-', 'AppError', 'ignore'], \
['LFI-', 'Buffer', 'ignore'], \
['LFI-', 'Format', 'ignore'], \
['LFI-', 'NoCSRF', 'ignore'], \
['LFI-', 'RFI', 'ignore'], \
['LFI-', 'DXSS', 'ignore'], \
['LFI-', 'RXSS', 'ignore'], \
['LFI-', 'SqlHyperT', 'ignore'], \
['LFI-', 'SqlMySql', 'ignore'], \
['LFI-', 'SqlOracleT', 'ignore'], \
['LFI-', 'SqlPostgreT', 'ignore'], \
['Redirect-', 'LDAP', 'ignore'], \
['Redirect-', 'NoCSRF', 'ignore'], \
['Redirect-', 'RFI', 'ignore'], \
['Redirect-', 'DXSS', 'ignore'], \
['Redirect-', 'RXSS', 'ignore'], \
['Redirect-', 'SqlHyperT', 'ignore'], \
['Redirect-', 'SqlMySql', 'ignore'], \
['Redirect-', 'SqlOracleT', 'ignore'], \
['Redirect-', 'SqlPostgreT', 'ignore'], \
['RFI-', 'AppError', 'ignore'], \
['RFI-', 'Buffer', 'ignore'], \
['RFI-', 'Format', 'ignore'], \
['RFI-', 'NoCSRF', 'ignore'], \
['RFI-', 'DXSS', 'ignore'], \
['RFI-', 'RXSS', 'ignore'], \
['RFI-', 'SqlHyperT', 'ignore'], \
['RFI-', 'SqlMySql', 'ignore'], \
['RFI-', 'SqlOracleT', 'ignore'], \
['RFI-', 'SqlPostgreT', 'ignore'], \
['RXSS-', 'Auto', 'ignore'], \
['RXSS-', 'Buffer', 'ignore'], \
['RXSS-', 'Format', 'ignore'], \
['RXSS-', 'HttpOnly', 'ignore'], \
['RXSS-', 'NoCSRF', 'ignore'], \
['RXSS-', 'SqlOracleT', 'ignore'], \
['RXSS-', 'SqlPostgreT', 'ignore'], \
['RXSS-', 'SqlMySql', 'ignore'], \
['RXSS-', 'SqlOracleT', 'ignore'], \
['RXSS-', 'ViewstateNoMac', 'ignore'], \
['SInjection-', 'AppError', 'ignore'], \
['SInjection-', 'Auto', 'ignore'], \
['SInjection-', 'Buffer', 'ignore'], \
['SInjection-', 'NoCSRF', 'ignore'], \
['SInjection-', 'Format', 'ignore'], \
['SInjection-', 'LDAP', 'ignore'], \
['SInjection-', 'RXSS', 'ignore'], \
['SInjection-', 'SqlHyperT', 'ignore'], \
['LoginBypass', 'Auto', 'ignore'], \
['CrlfRemovalInHttpHeader', 'HttpOnly', 'ignore'], \
['Tag2HtmlPageScopeValidViewstateRequired', 'ViewstateNoMac', 'ignore'], \
['session-password-autocomplete', 'NoCSRF', 'ignore'], \
#
['LFI-Detection-Evaluation', 'PathTrav', 'pass'], \
['LFI-FalsePositives', 'PathTrav', 'fail'], \
['Redirect-', 'ExtRedir', 'pass'], \
['RFI-Detection-Evaluation', 'RFI', 'pass'], \
['RFI-FalsePositives', 'RFI', 'fail'], \
['RXSS-Detection-Evaluation', 'DXSS', 'pass'], \
['RXSS-Detection-Evaluation', 'RXSS', 'pass'], \
['RXSS-FalsePositives-GET', 'DXSS', 'fail'], \
['RXSS-FalsePositives-GET', 'RXSS', 'fail'], \
['SInjection-Detection-Evaluation', 'SQLfp', 'pass'], \
['SInjection-Detection-Evaluation', 'SQLi', 'pass'], \
#['SInjection-Detection-Evaluation', 'SqlHyper', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlBool', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlGen', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlGenE', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySql', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySqlE', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySqlT', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlOracleT', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlPostgreT', 'pass'], \
['SInjection-FalsePositives', 'SQLfp', 'fail'], \
['SInjection-FalsePositives', 'SQLi', 'fail'], \
['SInjection-FalsePositives', 'SqlBool', 'fail'], \
['SInjection-FalsePositives', 'SqlGen', 'fail'], \
['SInjection-FalsePositives', 'SqlGenE', 'fail'], \
['SInjection-FalsePositives', 'SqlMySql', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlE', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlT', 'fail'], \
['SInjection-FalsePositives', 'SqlHyperT', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlT', 'fail'], \
['SInjection-FalsePositives', 'SqlOracleT', 'fail'], \
['SInjection-FalsePositives', 'SqlPostgreT', 'fail'], \
['info-cookie-no-httponly', 'HttpOnly', 'pass'], \
['info-server-stack-trace', 'AppError', 'pass'], \
['session-password-autocomplete', 'Auto', 'pass'], \
['weak-authentication-basic', 'WeakAuth', 'pass'], \
]
zap = ZAPv2(proxies={'http': zapUrl, 'https': zapUrl})
uniqueUrls = set([])
# alertsPerUrl is a disctionary of urlsummary to a dictionary of type to set of alertshortnames ;)
alertsPerUrl = {}
plugins = set([])
alertPassCount = {}
alertFailCount = {}
alertIgnoreCount = {}
alertOtherCount = {}
zapVersion = zap.core.version
totalAlerts = 0
offset = 0
page = 100
# Page through the alerts as otherwise ZAP can hang...
alerts = zap.core.alerts('', offset, page)
while len(alerts) > 0:
totalAlerts += len(alerts)
for alert in alerts:
url = alert.get('url')
# Grab the url before any '?'
url = url.split('?')[0]
#print 'URL: ' + url
urlEl = url.split('/')
if (len(urlEl) > 6):
#print 'URL 4:' + urlEl[4] + ' 6:' + urlEl[6].split('-')[0]
if (urlEl[3] != 'wavsep'):
print 'Ignoring non wavsep URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6]
continue
if (urlEl[6].split('-')[0][:9] == 'index.jsp'):
#print 'Ignoring index URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6]
continue
if (len(urlEl) > 7 and urlEl[4] == 'active'):
if (urlEl[7].split('-')[0][:4] != 'Case'):
#print 'Ignoring index URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6] + ' URL 7:' + urlEl[7]
continue
urlSummary = urlEl[4] + ' : ' + urlEl[5] + ' : ' + urlEl[6] + ' : ' + urlEl[7].split('-')[0]
else:
# Passive URLs have different format
urlSummary = urlEl[4] + ' : ' + urlEl[5] + ' : ' + urlEl[6]
#print 'URL summary:' + urlSummary
short = abbrev.get(alert.get('alert'))
if (short is None):
short = 'UNKNOWN'
print 'Unknown alert: ' + alert.get('alert')
aDict = alertsPerUrl.get(urlSummary, {'pass' : set([]), 'fail' : set([]), 'ignore' : set([]), 'other' : set([])})
added = False
for rule in rules:
if (rule[0] in urlSummary and rule[1] == short):
aDict[rule[2]].add(short)
# Counts per alert
if (rule[2] == 'pass'):
alertPassCount[short] = alertPassCount.get(short, 0) + 1
elif (rule[2] == 'fail'):
alertFailCount[short] = alertFailCount.get(short, 0) + 1
elif (rule[2] == 'ignore'):
alertIgnoreCount[short] = alertIgnoreCount.get(short, 0) + 1
added = True
break
if (not added):
aDict['other'].add(short)
alertOtherCount[short] = alertOtherCount.get(short, 0) + 1
alertsPerUrl[urlSummary] = aDict
plugins.add(alert.get('alert'))
uniqueUrls.add(url)
offset += page
alerts = zap.core.alerts('', offset, page)
#for key, value in alertsPerUrl.iteritems():
# print key, value
# Generate report file
reportFile = open('report.html', 'w')
reportFile.write("<html>\n")
reportFile.write(" <head>\n")
reportFile.write(" <title>ZAP Wavsep Report</title>\n")
reportFile.write(" <!--Load the AJAX API-->\n")
reportFile.write(" <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n")
reportFile.write(" </head>\n")
reportFile.write("<body>\n")
reportFile.write("<h1><img src=\"https://raw.githubusercontent.com/zaproxy/zaproxy/develop/src/resource/zap64x64.png\" align=\"middle\">OWASP ZAP wavsep results</h1>\n")
reportFile.write("Generated: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\n")
topResults = []
thisTop = ['', 0, 0]
groupResults = []
thisGroup = ['', 0, 0]
totalPass = 0
totalFail = 0
# Calculate the top level scores
for key, value in sorted(alertsPerUrl.iteritems()):
top = key.split(' : ')[1]
if ('-' in top):
top = top.split('-')[0] + '-' + top.split('-')[1]
if (top != thisTop[0]):
thisTop = [top, 0, 0] # top, pass, fail
topResults.append(thisTop)
if (len(value.get('pass')) > 0):
thisTop[1] += 1
elif (len(value.get('fail')) > 0):
thisTop[2] += 1
elif ('FalsePositive' in key):
thisTop[1] += 1
else:
thisTop[2] += 1
# Calculate the group scores
for key, value in sorted(alertsPerUrl.iteritems()):
group = key.split(' : ')[1]
if (group != thisGroup[0]):
thisGroup = [group, 0, 0] # group, pass, fail
groupResults.append(thisGroup)
if (len(value.get('pass')) > 0):
totalPass += 1
thisGroup[1] += 1
elif (len(value.get('fail')) > 0):
totalFail += 1
thisGroup[2] += 1
elif ('FalsePositive' in key):
totalPass += 1
thisGroup[1] += 1
else:
totalFail += 1
thisGroup[2] += 1
# Output the summary
scale=8
reportFile.write("<h3>Total Score</h3>\n")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (totalPass/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (totalFail/scale):
reportFile.write(" ")
reportFile.write("</font>")
total = 100 * totalPass / (totalPass + totalFail)
reportFile.write(str(total) + "%<br/><br/>\n")
reportFile.write('ZAP Version: ' + zapVersion + '<br/>\n')
reportFile.write('URLs found: ' + str(len(uniqueUrls)))
# Output the top level table
reportFile.write("<h3>Top Level Scores</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Top Level</th><th>Pass</th><th>Fail</th><th>Score</th><th>Chart</th></tr>\n")
scale=6
for topResult in topResults:
#print "%s Pass: %i Fail: %i Score: %i\%" % (topResult[0], topResult[1], topResult[2], (100*topResult[1]/topResult[1]+topResult[2]))
reportFile.write("<tr>")
reportFile.write("<td>" + topResult[0] + "</td>")
reportFile.write("<td align=\"right\">" + str(topResult[1]) + "</td>")
reportFile.write("<td align=\"right\">" + str(topResult[2]) + "</td>")
score = 100 * topResult[1] / (topResult[1] + topResult[2])
reportFile.write("<td align=\"right\">" + str(score) + "%</td>")
reportFile.write("<td>")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (topResult[1]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (topResult[2]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("<h3>Alerts</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Alert</th><th>Description</th><th>Pass</th><th>Fail</th><th>Ignore</th><th>Other</th></tr>\n")
#for key, value in abbrev.items():
for (k, v) in sorted(abbrev.items(), key=lambda (k,v): v):
reportFile.write("<tr>")
reportFile.write("<td>" + v + "</td>")
reportFile.write("<td>" + k + "</td>")
reportFile.write("<td>" + str(alertPassCount.get(v, 0)) +" </td>")
reportFile.write("<td>" + str(alertFailCount.get(v, 0)) +" </td>")
reportFile.write("<td>" + str(alertIgnoreCount.get(v, 0)) +" </td>")
reportFile.write("<td>" + str(alertOtherCount.get(v, 0)) +" </td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
# Output the group table
reportFile.write("<h3>Group Scores</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Group</th><th>Pass</th><th>Fail</th><th>Score</th><th>Chart</th></tr>\n")
scale=4
for groupResult in groupResults:
#print "%s Pass: %i Fail: %i Score: %i\%" % (groupResult[0], groupResult[1], groupResult[2], (100*groupResult[1]/groupResult[1]+groupResult[2]))
reportFile.write("<tr>")
reportFile.write("<td>" + groupResult[0] + "</td>")
reportFile.write("<td align=\"right\">" + str(groupResult[1]) + "</td>")
reportFile.write("<td align=\"right\">" + str(groupResult[2]) + "</td>")
score = 100 * groupResult[1] / (groupResult[1] + groupResult[2])
reportFile.write("<td align=\"right\">" + str(score) + "%</td>")
reportFile.write("<td>")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (groupResult[1]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (groupResult[2]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
# Output the detail table
reportFile.write("<h3>Detailed Results</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Page</th><th>Result</th><th>Pass</th><th>Fail</th><th>Ignore</th><th>Other</th></tr>\n")
for key, value in sorted(alertsPerUrl.iteritems()):
reportFile.write("<tr>")
keyArray = key.split(':')
if (len(keyArray) == 4):
reportFile.write("<td>" + keyArray[0] + keyArray[2] + keyArray[3] + "</td>")
else:
reportFile.write("<td>" + keyArray[0] + keyArray[2] + "</td>")
reportFile.write("<td>")
if (len(value.get('pass')) > 0):
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\"> PASS </font>")
elif (len(value.get('fail')) > 0):
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\"> FAIL </font>")
elif ('FalsePositive' in key):
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\"> PASS </font>")
else:
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\"> FAIL </font>")
reportFile.write("</td>")
reportFile.write("<td>")
if (value.get('pass') is not None):
reportFile.write(" ".join(value.get('pass')))
reportFile.write(" </td>")
reportFile.write("<td>")
if (value.get('fail') is not None):
reportFile.write(" ".join(value.get('fail')))
reportFile.write(" </td>")
reportFile.write("<td>")
if (value.get('ignore') is not None):
reportFile.write(" ".join(value.get('ignore')))
reportFile.write(" </td>")
reportFile.write("<td>")
if (value.get('other') is not None):
reportFile.write(" ".join(value.get('other')))
reportFile.write(" </td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("<h3>Plugin Times</h3>\n")
# The start of the chart script
reportFile.write("<script type=\"text/javascript\">\n")
reportFile.write(" // Load the Visualization API and the piechart package.\n")
reportFile.write(" google.load('visualization', '1.0', {'packages':['corechart']});\n")
reportFile.write(" // Set a callback to run when the Google Visualization API is loaded.\n")
reportFile.write(" google.setOnLoadCallback(drawChart);\n")
reportFile.write(" function drawChart() {\n")
reportFile.write(" // Create the data table.\n")
reportFile.write(" var data = new google.visualization.DataTable();\n")
reportFile.write(" data.addColumn('string', 'Plugin');\n")
reportFile.write(" data.addColumn('number', 'Time in ms');\n")
reportFile.write(" data.addRows([\n")
progress = zap.ascan.scan_progress()
# Loop through first time for the chart
for plugin in progress[1]['HostProcess']:
reportFile.write(" ['" + plugin['Plugin'][0] + "', " + plugin['Plugin'][3] + "],\n")
# The end of the chart script
reportFile.write(" ]);\n")
reportFile.write(" // Set chart options\n")
reportFile.write(" var options = {'title':'Plugin times',\n")
reportFile.write(" 'width':600,\n")
reportFile.write(" 'height':500};\n")
reportFile.write(" // Instantiate and draw our chart, passing in some options.\n")
reportFile.write(" var chart = new google.visualization.PieChart(document.getElementById('chart_div'));\n")
reportFile.write(" chart.draw(data, options);\n")
reportFile.write(" }\n")
reportFile.write("</script>\n")
reportFile.write("<div id=\"chart_div\"></div>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Plugin</th><th>ms</th></tr>\n")
# Loop through second time for the table
totalTime = 0
for plugin in progress[1]['HostProcess']:
reportFile.write("<tr>")
reportFile.write("<td>" + plugin['Plugin'][0] + "</td>")
# Convert ms into something more readable
t = int(plugin['Plugin'][3])
totalTime += t
s, ms = divmod(t, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
time = "%d:%02d:%02d.%03d" % (h, m, s, ms)
reportFile.write("<td>" + time + "</td>")
reportFile.write("</tr>\n")
reportFile.write("<tr><td></td><td></td></tr>")
reportFile.write("<tr>")
reportFile.write("<td>Total</td>")
# Convert ms into something more readable
s, ms = divmod(totalTime, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
time = "%d:%02d:%02d.%03d" % (h, m, s, ms)
reportFile.write("<td>" + time + "</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("</body></html>\n")
reportFile.close()
#for key, value in sorted(alertsPerUrl.iteritems()):
# print "%s: %s" % (key, value)
#print ''
print ''
print 'Got ' + str(totalAlerts) + ' alerts'
print 'Got ' + str(len(uniqueUrls)) + ' unique urls'
print 'Took ' + time
print 'Score ' + str(total)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.390625 | 2 |
ex115/biblioteca/interface/__init__.py | Danilo-Xaxa/python_curso_em_video | 4 | 13416 | def LeiaInt(msg1):
pronto = False
while True:
valor1 = input(msg1)
if valor1.isnumeric():
pronto = True
else:
print('\033[1;31mERRO! FAVOR DIGITAR UM NÚMERO INTEIRO VÁLIDO\033[m')
if pronto:
break
return valor1
def linha(tamanho=42):
return '-' * tamanho
def cabeçalho(txt):
print(linha())
print(txt.center(42))
print(linha())
def menu(lista):
cabeçalho('MENU PRINCIPAL')
x = 1
for item in lista:
print(f'\033[33m{x}\033[m - \033[34m{item}\033[m')
x += 1
print(linha())
opç = LeiaInt('\033[32mSua opção: \033[m')
return opç | 3.578125 | 4 |
pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | 0 | 13417 | <filename>pocketsmith/models/attachment.py
# coding: utf-8
"""
PocketSmith
The public PocketSmith API # noqa: E501
The version of the OpenAPI document: 2.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pocketsmith.configuration import Configuration
class Attachment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'content_type': 'str',
'content_type_meta': 'AttachmentContentTypeMeta',
'created_at': 'datetime',
'file_name': 'str',
'id': 'int',
'original_url': 'str',
'title': 'str',
'type': 'str',
'updated_at': 'datetime',
'variants': 'AttachmentVariants'
}
attribute_map = {
'content_type': 'content_type',
'content_type_meta': 'content_type_meta',
'created_at': 'created_at',
'file_name': 'file_name',
'id': 'id',
'original_url': 'original_url',
'title': 'title',
'type': 'type',
'updated_at': 'updated_at',
'variants': 'variants'
}
def __init__(self, content_type=None, content_type_meta=None, created_at=None, file_name=None, id=None, original_url=None, title=None, type=None, updated_at=None, variants=None, local_vars_configuration=None): # noqa: E501
"""Attachment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content_type = None
self._content_type_meta = None
self._created_at = None
self._file_name = None
self._id = None
self._original_url = None
self._title = None
self._type = None
self._updated_at = None
self._variants = None
self.discriminator = None
if content_type is not None:
self.content_type = content_type
if content_type_meta is not None:
self.content_type_meta = content_type_meta
if created_at is not None:
self.created_at = created_at
if file_name is not None:
self.file_name = file_name
if id is not None:
self.id = id
if original_url is not None:
self.original_url = original_url
if title is not None:
self.title = title
if type is not None:
self.type = type
if updated_at is not None:
self.updated_at = updated_at
if variants is not None:
self.variants = variants
@property
def content_type(self):
"""Gets the content_type of this Attachment. # noqa: E501
The content type of the attachment. # noqa: E501
:return: The content_type of this Attachment. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this Attachment.
The content type of the attachment. # noqa: E501
:param content_type: The content_type of this Attachment. # noqa: E501
:type: str
"""
self._content_type = content_type
@property
def content_type_meta(self):
"""Gets the content_type_meta of this Attachment. # noqa: E501
:return: The content_type_meta of this Attachment. # noqa: E501
:rtype: AttachmentContentTypeMeta
"""
return self._content_type_meta
@content_type_meta.setter
def content_type_meta(self, content_type_meta):
"""Sets the content_type_meta of this Attachment.
:param content_type_meta: The content_type_meta of this Attachment. # noqa: E501
:type: AttachmentContentTypeMeta
"""
self._content_type_meta = content_type_meta
@property
def created_at(self):
"""Gets the created_at of this Attachment. # noqa: E501
When the attachment was created # noqa: E501
:return: The created_at of this Attachment. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Attachment.
When the attachment was created # noqa: E501
:param created_at: The created_at of this Attachment. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def file_name(self):
"""Gets the file_name of this Attachment. # noqa: E501
The file name of the attachment # noqa: E501
:return: The file_name of this Attachment. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this Attachment.
The file name of the attachment # noqa: E501
:param file_name: The file_name of this Attachment. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def id(self):
"""Gets the id of this Attachment. # noqa: E501
The unique identifier of the attachment # noqa: E501
:return: The id of this Attachment. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Attachment.
The unique identifier of the attachment # noqa: E501
:param id: The id of this Attachment. # noqa: E501
:type: int
"""
self._id = id
@property
def original_url(self):
"""Gets the original_url of this Attachment. # noqa: E501
The url of the attachment # noqa: E501
:return: The original_url of this Attachment. # noqa: E501
:rtype: str
"""
return self._original_url
@original_url.setter
def original_url(self, original_url):
"""Sets the original_url of this Attachment.
The url of the attachment # noqa: E501
:param original_url: The original_url of this Attachment. # noqa: E501
:type: str
"""
self._original_url = original_url
@property
def title(self):
"""Gets the title of this Attachment. # noqa: E501
The title of the attachment. If blank or not provided, the title will be derived from the file name. # noqa: E501
:return: The title of this Attachment. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Attachment.
The title of the attachment. If blank or not provided, the title will be derived from the file name. # noqa: E501
:param title: The title of this Attachment. # noqa: E501
:type: str
"""
self._title = title
@property
def type(self):
"""Gets the type of this Attachment. # noqa: E501
The type of attachment # noqa: E501
:return: The type of this Attachment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Attachment.
The type of attachment # noqa: E501
:param type: The type of this Attachment. # noqa: E501
:type: str
"""
self._type = type
@property
def updated_at(self):
"""Gets the updated_at of this Attachment. # noqa: E501
When the attachment was last updated # noqa: E501
:return: The updated_at of this Attachment. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Attachment.
When the attachment was last updated # noqa: E501
:param updated_at: The updated_at of this Attachment. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def variants(self):
"""Gets the variants of this Attachment. # noqa: E501
:return: The variants of this Attachment. # noqa: E501
:rtype: AttachmentVariants
"""
return self._variants
@variants.setter
def variants(self, variants):
"""Sets the variants of this Attachment.
:param variants: The variants of this Attachment. # noqa: E501
:type: AttachmentVariants
"""
self._variants = variants
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attachment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Attachment):
return True
return self.to_dict() != other.to_dict()
| 1.648438 | 2 |
pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | 0 | 13418 | """
high-level NSQ reader class built on top of a Tornado IOLoop supporting both sync and
async modes of operation.
supports various hooks to modify behavior when heartbeats are received, temporarily
disable the reader, and pre-process/validate messages.
when supplied a list of nsqlookupd addresses, a reader instance will periodically poll
the specified topic in order to discover new producers and reconnect to existing ones.
sync ex.
import nsq
def task1(message):
print message
return True
def task2(message):
print message
return True
all_tasks = {"task1": task1, "task2": task2}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="asdf", lookupd_poll_interval=15)
nsq.run()
async ex.
import nsq
buf = []
def process_message(message, finisher):
global buf
# cache both the message and the finisher callable for later processing
buf.append((message, finisher))
if len(buf) >= 3:
print '****'
for msg, finish_fxn in buf:
print msg
finish_fxn(True) # use finish_fxn to tell NSQ of success
print '****'
buf = []
else:
print 'deferring processing'
all_tasks = {"task1": process_message}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="async", async=True)
nsq.run()
"""
import logging
try:
import simplejson as json
except ImportError:
import json
import time
import signal
import socket
import functools
import urllib
import random
import tornado.ioloop
import tornado.httpclient
import BackoffTimer
import nsq
import async
class RequeueWithoutBackoff(Exception):
"""exception for requeueing a message without incrementing backoff"""
pass
class Reader(object):
def __init__(self, all_tasks, topic, channel,
nsqd_tcp_addresses=None, lookupd_http_addresses=None, async=False,
max_tries=5, max_in_flight=1, requeue_delay=90, lookupd_poll_interval=120):
"""
Reader receives messages over the specified ``topic/channel`` and provides an async loop
that calls each task method provided by ``all_tasks`` up to ``max_tries``.
It will handle sending FIN or REQ commands based on feedback from the task methods. When
re-queueing, an increasing delay will be calculated automatically. Additionally, when
message processing fails, it will backoff for increasing multiples of ``requeue_delay``
between updating of RDY count.
``all_tasks`` defines the a mapping of tasks and callables that will be executed for each
message received.
``topic`` specifies the desired NSQ topic
``channel`` specifies the desired NSQ channel
``nsqd_tcp_addresses`` a sequence of string addresses of the nsqd instances this reader
should connect to
``lookupd_http_addresses`` a sequence of string addresses of the nsqlookupd instances this
reader should query for producers of the specified topic
``async`` determines whether handlers will do asynchronous processing. If set to True,
handlers must accept a keyword argument called ``finisher`` that will be a callable used
to signal message completion, taking a boolean argument indicating success.
``max_tries`` the maximum number of attempts the reader will make to process a message after
which messages will be automatically discarded
``max_in_flight`` the maximum number of messages this reader will pipeline for processing.
this value will be divided evenly amongst the configured/discovered nsqd producers.
``requeue_delay`` the base multiple used when re-queueing (multiplied by # of attempts)
``lookupd_poll_interval`` the amount of time in between querying all of the supplied
nsqlookupd instances. a random amount of time based on thie value will be initially
introduced in order to add jitter when multiple readers are running.
"""
assert isinstance(all_tasks, dict)
for key, method in all_tasks.items():
assert callable(method), "key %s must have a callable value" % key
assert isinstance(topic, (str, unicode)) and len(topic) > 0
assert isinstance(channel, (str, unicode)) and len(channel) > 0
assert isinstance(max_in_flight, int) and 0 < max_in_flight < 2500
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, (str, unicode))
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, (str, unicode))
lookupd_http_addresses = [lookupd_http_addresses]
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.requeue_delay = int(requeue_delay * 1000)
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.lookupd_poll_interval = lookupd_poll_interval
self.async = async
self.task_lookup = all_tasks
self.backoff_timer = dict((k, BackoffTimer.BackoffTimer(0, 120)) for k in self.task_lookup.keys())
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.conns = {}
self.http_client = tornado.httpclient.AsyncHTTPClient()
self.last_recv_timestamps = {}
logging.info("starting reader for topic '%s'..." % self.topic)
for task in self.task_lookup:
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port), task)
# trigger the first one manually
self.query_lookupd()
tornado.ioloop.PeriodicCallback(self.check_last_recv_timestamps, 60 * 1000).start()
periodic = tornado.ioloop.PeriodicCallback(self.query_lookupd, self.lookupd_poll_interval * 1000)
# randomize the time we start this poll loop so that all servers don't query at exactly the same time
# randomize based on 10% of the interval
delay = random.random() * self.lookupd_poll_interval * .1
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + delay, periodic.start)
def _client_callback(self, success, message=None, task=None, conn=None):
'''
This is the method that an asynchronous nsqreader should call to indicate
async completion of a message. This will most likely be exposed as the finisher
callable created in `callback` above with some functools voodoo
'''
if success:
self.backoff_timer[task].success()
self.finish(conn, message.id)
else:
self.backoff_timer[task].failure()
self.requeue(conn, message)
def requeue(self, conn, message, delay=True):
if message.attempts > self.max_tries:
self.giving_up(message)
return self.finish(conn, message.id)
try:
# ms
requeue_delay = self.requeue_delay * message.attempts if delay else 0
conn.send(nsq.requeue(message.id, str(requeue_delay)))
except Exception:
conn.close()
logging.exception('[%s] failed to send requeue %s @ %d' % (conn, message.id, requeue_delay))
def finish(self, conn, message_id):
'''
This is an internal method for NSQReader
'''
try:
conn.send(nsq.finish(message_id))
except Exception:
conn.close()
logging.exception('[%s] failed to send finish %s' % (conn, message_id))
def connection_max_in_flight(self):
return max(1, self.max_in_flight / max(1, len(self.conns)))
def handle_message(self, conn, task, message):
conn.ready -= 1
# update ready count if necessary...
# if we're in a backoff state for this task
# set a timer to actually send the ready update
per_conn = self.connection_max_in_flight()
if not conn.is_sending_ready and (conn.ready <= 1 or conn.ready < int(per_conn * 0.25)):
backoff_interval = self.backoff_timer[task].get_interval()
if self.disabled():
backoff_interval = 15
if backoff_interval > 0:
conn.is_sending_ready = True
logging.info('[%s] backing off for %0.2f seconds' % (conn, backoff_interval))
send_ready_callback = functools.partial(self.send_ready, conn, per_conn)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + backoff_interval, send_ready_callback)
else:
self.send_ready(conn, per_conn)
try:
processed_message = self.preprocess_message(message)
if not self.validate_message(processed_message):
return self.finish(conn, message.id)
except Exception:
logging.exception('[%s] caught exception while preprocessing' % conn)
return self.requeue(conn, message)
method_callback = self.task_lookup[task]
try:
if self.async:
# this handler accepts the finisher callable as a keyword arg
finisher = functools.partial(self._client_callback, message=message, task=task, conn=conn)
return method_callback(processed_message, finisher=finisher)
else:
# this is an old-school sync handler, give it just the message
if method_callback(processed_message):
self.backoff_timer[task].success()
return self.finish(conn, message.id)
self.backoff_timer[task].failure()
except RequeueWithoutBackoff:
logging.info('RequeueWithoutBackoff')
except Exception:
logging.exception('[%s] caught exception while handling %s' % (conn, task))
self.backoff_timer[task].failure()
return self.requeue(conn, message)
def send_ready(self, conn, value):
if self.disabled():
logging.info('[%s] disabled, delaying ready state change', conn)
send_ready_callback = functools.partial(self.send_ready, conn, value)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 15, send_ready_callback)
return
try:
conn.send(nsq.ready(value))
conn.ready = value
except Exception:
conn.close()
logging.exception('[%s] failed to send ready' % conn)
conn.is_sending_ready = False
def _data_callback(self, conn, raw_data, task):
self.last_recv_timestamps[get_conn_id(conn, task)] = time.time()
frame, data = nsq.unpack_response(raw_data)
if frame == nsq.FRAME_TYPE_MESSAGE:
message = nsq.decode_message(data)
try:
self.handle_message(conn, task, message)
except Exception:
logging.exception('[%s] failed to handle_message() %r' % (conn, message))
elif frame == nsq.FRAME_TYPE_RESPONSE and data == "_heartbeat_":
self.heartbeat(conn)
conn.send(nsq.nop())
def connect_to_nsqd(self, address, port, task):
assert isinstance(address, (str, unicode))
assert isinstance(port, int)
conn_id = address + ':' + str(port) + ':' + task
if conn_id in self.conns:
return
logging.info("[%s] connecting to nsqd for '%s'", address + ':' + str(port), task)
connect_callback = functools.partial(self._connect_callback, task=task)
data_callback = functools.partial(self._data_callback, task=task)
close_callback = functools.partial(self._close_callback, task=task)
conn = async.AsyncConn(address, port, connect_callback, data_callback, close_callback)
conn.connect()
self.conns[conn_id] = conn
def _connect_callback(self, conn, task):
if len(self.task_lookup) > 1:
channel = self.channel + '.' + task
else:
channel = self.channel
initial_ready = self.connection_max_in_flight()
try:
conn.send(nsq.subscribe(self.topic, channel, self.short_hostname, self.hostname))
conn.send(nsq.ready(initial_ready))
conn.ready = initial_ready
conn.is_sending_ready = False
except Exception:
conn.close()
logging.exception('[%s] failed to bootstrap connection' % conn)
def _close_callback(self, conn, task):
conn_id = get_conn_id(conn, task)
if conn_id in self.conns:
del self.conns[conn_id]
logging.warning("[%s] connection closed... %d left open", conn, len(self.conns))
if len(self.conns) == 0 and len(self.lookupd_http_addresses) == 0:
logging.warning("all connections closed and no lookupds... exiting")
tornado.ioloop.IOLoop.instance().stop()
def query_lookupd(self):
for endpoint in self.lookupd_http_addresses:
lookupd_url = endpoint + "/lookup?topic=" + urllib.quote(self.topic)
req = tornado.httpclient.HTTPRequest(lookupd_url, method="GET",
connect_timeout=1, request_timeout=2)
callback = functools.partial(self._finish_query_lookupd, endpoint=endpoint)
self.http_client.fetch(req, callback=callback)
def _finish_query_lookupd(self, response, endpoint):
if response.error:
logging.warning("[%s] lookupd error %s", endpoint, response.error)
return
try:
lookup_data = json.loads(response.body)
except json.JSONDecodeError:
logging.warning("[%s] failed to parse JSON from lookupd: %r", endpoint, response.body)
return
if lookup_data['status_code'] != 200:
logging.warning("[%s] lookupd responded with %d", endpoint, lookup_data['status_code'])
return
for task in self.task_lookup:
for producer in lookup_data['data']['producers']:
self.connect_to_nsqd(producer['address'], producer['tcp_port'], task)
def check_last_recv_timestamps(self):
now = time.time()
for conn_id, conn in dict(self.conns).iteritems():
timestamp = self.last_recv_timestamps.get(conn_id, 0)
if (now - timestamp) > 60:
# this connection hasnt received data beyond
# the normal heartbeat interval, close it
logging.warning("[%s] connection is stale, closing", conn)
conn = self.conns[conn_id]
conn.close()
#
# subclass overwriteable
#
def giving_up(self, message):
logging.warning("giving up on message '%s' after max tries %d", message.id, self.max_tries)
def disabled(self):
return False
def heartbeat(self, conn):
pass
def validate_message(self, message):
return True
def preprocess_message(self, message):
return message
def get_conn_id(conn, task):
return str(conn) + ':' + task
def _handle_term_signal(sig_num, frame):
logging.info('TERM Signal handler called with signal %r' % sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
| 2.484375 | 2 |
main.py | ygidtu/mountainClimber | 0 | 13419 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created at 2020.09.04 by <NAME>
"""
import warnings
warnings.filterwarnings("ignore")
import click
from cli.climb import climb
from cli.diff import diff
@click.group()
def main():
pass
main.add_command(climb)
main.add_command(diff)
if __name__ == '__main__':
main()
| 1.820313 | 2 |
app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 0 | 13420 | import os
from data.raw_data_loader.base.base_raw_data_loader import Seq2SeqRawDataLoader
class RawDataLoader(Seq2SeqRawDataLoader):
def __init__(self, data_path):
super().__init__(data_path)
self.cnn_path = "cnn/stories"
self.dailymail_path = "dailymail/stories"
def load_data(self):
if len(self.X) == 0 or len(self.Y) == 0:
total_size = 0
for root, dirs, files in os.walk(
os.path.join(self.data_path, self.cnn_path)
):
for file_name in files:
file_path = os.path.join(root, file_name)
processed_size = self.process_data_file(file_path)
total_size += processed_size
for root, dirs, files in os.walk(
os.path.join(self.data_path, self.dailymail_path)
):
for file_name in files:
file_path = os.path.join(root, file_name)
processed_size = self.process_data_file(file_path)
total_size += processed_size
index_list = [i for i in range(total_size)]
self.attributes["index_list"] = index_list
def process_data_file(self, file_path):
cnt = 0
article_lines = []
abstract_lines = []
next_is_highlight = False
with open(file_path, "r") as f:
for line in f:
line = line.strip()
if line:
if line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
abstract_lines.append(line)
else:
article_lines.append(line)
assert len(self.X) == len(self.Y)
idx = len(self.X)
self.X[idx] = " ".join(article_lines)
self.Y[idx] = " ".join(
["%s %s %s" % ("<s>", sent, "</s>") for sent in abstract_lines]
)
cnt += 1
return cnt
| 2.625 | 3 |
packages/utils/propagate_license.py | justi/m2g | 12 | 13421 | <filename>packages/utils/propagate_license.py
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# propagate_license.py
# Created by <NAME> on 2014-05-16.
# Email: <EMAIL>
__license_header__ = """
{} Copyright 2014 Open Connectome Project (http://openconnecto.me)
{}
{} Licensed under the Apache License, Version 2.0 (the "License");
{} you may not use this file except in compliance with the License.
{} You may obtain a copy of the License at
{}
{} http://www.apache.org/licenses/LICENSE-2.0
{}
{} Unless required by applicable law or agreed to in writing, software
{} distributed under the License is distributed on an "AS IS" BASIS,
{} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
{} See the License for the specific language governing permissions and
{} limitations under the License.
{}
"""
COMM_COUNT = 14
comm = {".py":"#", ".pyx":"#", "": "#", ".html":"", ".sh":"#", ".r":"#", ".m":"%", ".c":"//",
".c++":"//", ".java":"//", ".js":"//"}
import argparse
import os
def add(files):
global __license_header__
for full_fn in files:
license_header = __license_header__
print "Processing file: %s ..." % full_fn
script = open(full_fn, "rb")
lines = script.read().splitlines()
script.close()
# Exception for html
comment_style = comm[os.path.splitext(full_fn)[1].lower()]
if lines[0].startswith("#!/usr/bin"):
if lines[5].startswith("# Copyright"): # get rid of copyright year
del lines[5], lines[1]
lines.insert(1, license_header.format(*([comment_style]*COMM_COUNT)))
else:
#license_header += "{} Created by <NAME>\n{} Email: <EMAIL>".format(*([comment_style]*2))
if os.path.splitext(full_fn)[1].lower().strip() == ".html":
license_header = "<!-- " + license_header + " -->"
lines.insert(0, license_header.format(*([comment_style]*COMM_COUNT)))
script = open(full_fn, "wb")
script.write("\n".join(lines))
def hidden(path):
breakdown = path.split("/")
for item in breakdown:
if item.startswith("."):
return True
return False
def rm(dirname):
pass
def main():
parser = argparse.ArgumentParser(description="Add or Update license headers to code")
parser.add_argument("-r", "--remove", action="store_true", help="Remove the license")
parser.add_argument("-d", "--dirname", action="store", default=".", help="Directory where to start walk")
parser.add_argument("-f", "--files", action="store", nargs="*", help="Files you want license added to")
parser.add_argument("-e", "--file_exts", nargs="*", action="store", \
default=[".py", ".pyx", ".html", ".sh", ".R", ".m", ""], \
help="File extensions to add to the files altered")
parser.add_argument("-i", "--ignore", nargs="*", action="store", \
default=["README", "__init__.py", "TODO", __file__], \
help="Files to ignore")
result = parser.parse_args()
if result.files:
print "Licensing individual files ..."
add(result.files)
exit(1)
else:
print "Licensing a directory of files ..."
files = []
for root, dirnames, filenames in os.walk(os.path.abspath(result.dirname)):
for filename in filenames:
full_fn = os.path.join(root, filename)
if os.path.isfile(full_fn) and not hidden(full_fn) \
and not os.path.basename(full_fn) in result.ignore \
and ( os.path.splitext(full_fn)[-1].lower().strip() in result.file_exts ):
files.append(full_fn)
add(files)
if __name__ == "__main__":
main()
| 1.773438 | 2 |
core/gf/test.py | zorrock/accelerated-text | 1 | 13422 | <filename>core/gf/test.py
import pytest
import server
@pytest.fixture(scope="session")
def authorship_grammar():
with open("test_grammars/Authorship.gf", "r") as f:
abstract = {"content": f.read()}
with open("test_grammars/AuthorshipEng.gf", "r") as f:
inst = {"content": f.read(), "key": "Eng"}
return server.compile_grammar("Authorship", abstract, [inst])
def test_compile_grammar(authorship_grammar):
result = authorship_grammar
print(result)
assert result
langs = result.languages
assert len(langs) == 1
assert "AuthorshipEng" in langs
def test_generation_results(authorship_grammar):
expressions = server.generate_expressions(authorship_grammar)
results = list([(k, server.generate_variants(expressions, concrete))
for k, concrete in authorship_grammar.languages.items()])
print(results)
(_, r0) = results[0]
assert set(r0) == set([
"good {{TITLE}} is authored by {{AUTHOR}}",
"good {{TITLE}} is written by {{AUTHOR}}",
"excellent {{TITLE}} is authored by {{AUTHOR}}",
"excellent {{TITLE}} is written by {{AUTHOR}}",
"{{AUTHOR}} is the author of excellent {{TITLE}}",
"{{AUTHOR}} is the author of good {{TITLE}}",
"{{AUTHOR}} was authored by good {{TITLE}}",
"{{AUTHOR}} was authored by excellent {{TITLE}}",
])
| 2.359375 | 2 |
troposphere/validators/dynamodb.py | compose-x/troposphere | 0 | 13423 | <filename>troposphere/validators/dynamodb.py
# Copyright (c) 2012-2022, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .. import AWSHelperFn, If
def attribute_type_validator(x):
"""
Property: AttributeDefinition.AttributeType
"""
valid_types = ["S", "N", "B"]
if x not in valid_types:
raise ValueError("AttributeType must be one of: %s" % ", ".join(valid_types))
return x
def key_type_validator(x):
"""
Property: KeySchema.KeyType
"""
valid_types = ["HASH", "RANGE"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x
def projection_type_validator(x):
"""
Property: Projection.ProjectionType
"""
valid_types = ["KEYS_ONLY", "INCLUDE", "ALL"]
if x not in valid_types:
raise ValueError("ProjectionType must be one of: %s" % ", ".join(valid_types))
return x
def billing_mode_validator(x):
"""
Property: Table.BillingMode
"""
valid_modes = ["PROVISIONED", "PAY_PER_REQUEST"]
if x not in valid_modes:
raise ValueError(
"Table billing mode must be one of: %s" % ", ".join(valid_modes)
)
return x
def table_class_validator(x):
"""
Property: Table.TableClass
"""
valid_table_classes = ["STANDARD", "STANDARD_INFREQUENT_ACCESS"]
if x not in valid_table_classes:
raise ValueError(
"Table class must be one of: %s" % ", ".join(valid_table_classes)
)
return x
def validate_table(self):
"""
Class: Table
"""
billing_mode = self.properties.get("BillingMode", "PROVISIONED")
indexes = self.properties.get("GlobalSecondaryIndexes", [])
tput_props = [self.properties]
tput_props.extend([x.properties for x in indexes if not isinstance(x, AWSHelperFn)])
def check_if_all(name, props):
validated = []
for prop in props:
is_helper = isinstance(prop.get(name), AWSHelperFn)
validated.append(name in prop or is_helper)
return all(validated)
def check_any(name, props):
validated = []
for prop in props:
is_helper = isinstance(prop.get(name), AWSHelperFn)
validated.append(name in prop and not is_helper)
return any(validated)
if isinstance(billing_mode, If):
if check_any("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is per-request. "
"ProvisionedThroughput property is mutually exclusive"
)
return
if billing_mode == "PROVISIONED":
if not check_if_all("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is provisioned. "
"ProvisionedThroughput required if available"
)
elif billing_mode == "PAY_PER_REQUEST":
if check_any("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is per-request. "
"ProvisionedThroughput property is mutually exclusive"
)
| 2.125 | 2 |
videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 0 | 13424 | <filename>videoclip_sources/e004.py<gh_stars>0
import time
from misty2py.robot import Misty
from misty2py.utils.env_loader import EnvLoader
from misty2py_skills.utils.utils import get_abs_path
env_loader = EnvLoader(get_abs_path(".env"))
m = Misty(env_loader.get_ip())
d = m.event("subscribe", type="BatteryCharge")
e_name = d.get("event_name")
time.sleep(1)
d = m.event("get_data", name=e_name)
# do something with the data here
d = m.event("unsubscribe", name=e_name)
| 2.1875 | 2 |
p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 0 | 13425 | #Map incorrect and abbreviated street names with correct/better ones
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = "albany.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
# UPDATE THIS VARIABLE
mapping = {"rd": "Road",
"Rd": "Road",
"road": "Road",
"Ave": "Avenue",
"Ave.": "Avenue",
"AVE": "Avenue",
"way" : "Way",
"street": "Street",
"way":"Way",
"Dr.":"Drive",
"Blvd":"Boulevard",
"rt":"Route",
"Ext": "Extension",
"Jay":"Jay Street",
"Nott St E":"Nott Street East",
"Troy-Schenetady-Road":"Troy Schenectady Road",
"Troy-Schenetady Rd" :"Troy Schenectady Road",
"Delatour":"Delatour Road",
"Deltour": "Delatour Road",
"Sparrowbush": "Sparrowbush Road"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
n = street_type_re.search(name)
if n:
n = n.group()
for m in mapping:
if n == m:
name = name[:-len(n)] + mapping[m]
return name
def test():
st_types = audit(OSMFILE)
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
if (name == better_name):
continue
print name + " --> " + better_name
if __name__ == '__main__':
test() | 3 | 3 |
modules/week2/utils.py | tobias-z/4-sem-python | 0 | 13426 | from io import TextIOWrapper
import os
from typing import List
OUTPUT = "files/output.csv"
FOLDER = "modules/week2/folders"
def get_file_names(folderpath, out=OUTPUT):
"""takes a path to a folder and writes all filenames in the folder to a specified output file"""
dir_list = os.listdir(folderpath)
with open(out, "w") as file:
for line in dir_list:
file.write(line + "\n")
def get_all_file_names(folderpath, out=OUTPUT):
"""takes a path to a folder and write all filenames recursively (files of all sub folders to)"""
def write_dir_to_file(file: TextIOWrapper, dir: List[str], folderpath: str):
for line in dir:
path_to_file = f"{folderpath}/{line}"
if os.path.isdir(path_to_file):
write_dir_to_file(file, os.listdir(path_to_file), path_to_file)
continue
file.write(line + "\n")
with open(out, "w") as file:
write_dir_to_file(file, os.listdir(folderpath), folderpath)
def print_line_one(file_names: List[str]):
"""takes a list of filenames and print the first line of each"""
for file_name in file_names:
with open(file_name) as file:
print(file.readline())
def print_emails(file_names: List[str]):
"""takes a list of filenames and print each line that contains an email (just look for @)"""
for file_name in file_names:
with open(file_name) as file:
for line in file.readlines():
if "@" in line:
print(line)
def write_headlines(md_files: List[str], out=OUTPUT):
"""takes a list of md files and writes all headlines (lines starting with #) to a file"""
with open(out, "w") as output_file:
for md_file in md_files:
with open(md_file) as file:
for line in file.readlines():
if line.startswith("#"):
output_file.write(line)
| 3.5625 | 4 |
src/api/providers.py | ismetacar/ertis-auth | 17 | 13427 | <reponame>ismetacar/ertis-auth
import json
from sanic import response
from sanic_openapi import doc
from src.plugins.authorization import authorized
from src.plugins.validator import validated
from src.request_models.providers import Provider
from src.request_models.query_model import Query
from src.resources.generic import ensure_membership_is_exists, QUERY_BODY_SCHEMA
from src.resources.providers.resource import CREATE_PROVIDER_SCHEMA
from src.utils import query_helpers
from src.utils.json_helpers import bson_to_json
def init_providers_api(app, settings):
# region Create Provider
@app.route('/api/v1/memberships/<membership_id>/providers', methods=['POST'])
@doc.tag("Providers")
@doc.operation("Create Provider")
@doc.consumes(Provider, location="body", content_type="application/json")
@validated(CREATE_PROVIDER_SCHEMA)
@authorized(app, settings, methods=['POST'], required_permission='providers.create')
async def create_provider(request, membership_id, *args, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
body = request.json
resource = await app.provider_service.create_provider(body, request.ctx.utilizer)
return response.json(json.loads(json.dumps(resource, default=bson_to_json)), 201)
# endregion
# region Get Provider
@app.route('/api/v1/memberships/<membership_id>/providers/<provider_id>', methods=['GET'])
@doc.tag("Providers")
@doc.operation("Get Provider")
@authorized(app, settings, methods=['GET'], required_permission='providers.read')
async def get_provider(request, membership_id, provider_id, *args, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
resource = await app.provider_service.get_provider(provider_id, request.ctx.utilizer)
return response.json(json.loads(json.dumps(resource, default=bson_to_json)))
# endregion
# region Update Provider
@app.route('/api/v1/memberships/<membership_id>/providers/<provider_id>', methods=['PUT'])
@doc.tag("Providers")
@doc.operation("Update Provider")
@doc.consumes(Provider, location="body", content_type="application/json")
@authorized(app, settings, methods=['PUT'], required_permission='providers.update')
async def update_provider(request, membership_id, provider_id, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
body = request.json
resource = await app.provider_service.update_provider(provider_id, body, request.ctx.utilizer,
app.persist_event)
return response.json(json.loads(json.dumps(resource, default=bson_to_json)), 200)
# endregion
# region Delete Provider
@app.route('/api/v1/memberships/<membership_id>/providers/<provider_id>', methods=['DELETE'])
@doc.tag("Providers")
@doc.operation("Delete Provider")
@authorized(app, settings, methods=['DELETE'], required_permission='providers.delete')
async def delete_provider(request, membership_id, provider_id, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
await app.provider_service.delete_provider(provider_id, request.ctx.utilizer, app.persist_event)
return response.json({}, 204)
# endregion
# region Query Applications
# noinspection DuplicatedCode
@app.route('/api/v1/memberships/<membership_id>/providers/_query', methods=['POST'])
@doc.tag("Providers")
@doc.operation("Query Providers")
@doc.consumes(Query, location="body", content_type="application/json")
@authorized(app, settings, methods=['POST'], required_permission='providers.read')
@validated(QUERY_BODY_SCHEMA)
async def query_providers(request, membership_id, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
where, select, limit, skip, sort = query_helpers.parse(request)
providers, count = await app.provider_service.query_providers(
membership_id,
where,
select,
limit,
skip,
sort
)
response_json = json.loads(json.dumps({
'data': {
'items': providers,
'count': count
}
}, default=bson_to_json))
return response.json(response_json, 200)
# endregion
| 2 | 2 |
lista08_pesquisa/questao02.py | mayararysia/ESTD | 0 | 13428 | <reponame>mayararysia/ESTD<filename>lista08_pesquisa/questao02.py
# -*- coding: utf-8 -*-
#Lista de Exercícios 08 (Pesquisa) - Questão 02
#<NAME>
from time import time
from time import sleep
from random import randint
"""
2. Use as duas funções de busca binária apresentadas (iterativa e recursiva). Gere
uma lista de números aleatórios, ordene-os e verifique o desempenho delas. Qual
os resultados?
"""
#Busca Binária - código recursivo
def busca_binaria(uma_lista, item_procurado):
if len(uma_lista) == 0:
return False
meio = len(uma_lista)//2
if uma_lista[meio] == item_procurado:
return True
if item_procurado < uma_lista[meio]:
return busca_binaria(uma_lista[:meio], item_procurado)
else:
return busca_binaria(uma_lista[meio+1:], item_procurado)
#Busca Binária - código iterativo
def busca_binaria_it(uma_lista, item_pesquisado):
inicio = 0
fim = len(uma_lista)-1
encontrou = False
while inicio<=fim and not encontrou:
meio = (inicio + fim)//2
if uma_lista[meio] == item_pesquisado:
encontrou = True
else:
if item_pesquisado < uma_lista[meio]:
fim = meio-1
else:
inicio = meio+1
return encontrou
#ordena a lista
def ordena(lista):
quant = tam = len(lista)
continua = True
while quant>=1 and continua:
continua = False
for i in range(tam):
j=i+1
if j != tam and lista[i] > lista[j]:
continua = True
ant = lista[i]
lista[i] = lista[j]
lista[j] = ant
i=j
quant-=1
return lista
#cria a lista
def criaLista():
lista = []
for i in range(9):
num = randint(0, 42)
lista.append(num)
return lista
def Teste(lista, num):
print('Procurando ', num,'na lista', lista)
inicio = time()
result = busca_binaria(lista, num)
fim = time()
tempo_gasto = fim-inicio
print('resultado', result)
return tempo_gasto
def Teste_it(lista, num):
print('Procurando ', num,'na lista', lista)
inicio = time()
result = busca_binaria_it(lista, num)
fim = time()
tempo_gasto = fim-inicio
print('resultado', result)
return tempo_gasto
if __name__ == '__main__':
l = criaLista()
lista = ordena(l)
qtd_br = qtd_bi = 0
#Testes
for i in range(5):
num = randint(0, 42)
print("<< Busca Recursiva >> \n")
tempo_gasto_br = Teste(lista, num)
print('\ttempo gasto: ', tempo_gasto_br)
print('\n\n')
sleep(2)
print("<< Busca Iterativa >> \n")
tempo_gasto_bi = Teste_it(lista, num)
print('\ttempo gasto: ', tempo_gasto_bi)
print('\n\n')
if tempo_gasto_br < tempo_gasto_bi:
qtd_br +=1
print('\n-> Busca Recursiva levou o menor tempo\n')
else:
qtd_bi +=1
print('\n-> Busca Iterativa levou o menor tempo\n')
print("------- ------- ------- ------- -------")
print("\nCONCLUSÃO\n\n ")
if qtd_br > qtd_bi:
print("Busca Binária Recursiva teve o melhor desempenho!")
else:
print("Busca Binária Iterativa teve o melhor desempenho!")
print("Quantidade Binária Recursiva: ", qtd_br)
print("Quantidade Binária Iterativa: ", qtd_bi)
| 3.484375 | 3 |
ccvpn/views/__init__.py | CCrypto/ccvpn | 81 | 13429 | <filename>ccvpn/views/__init__.py
import codecs
import markdown
import os
import logging
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPOk, HTTPNotFound
from sqlalchemy import func
from mako.lookup import TemplateLookup
import mako.exceptions
logger = logging.getLogger(__name__)
from ccvpn.models import DBSession, User, IcingaError, IcingaQuery, Gateway, VPNSession
from ccvpn.views import account, admin, api, order # noqa
@view_config(context=Exception)
def error_view(exc, request):
logger.exception('Exception', exc_info=exc)
raise
@view_config(route_name='home', renderer='home.mako')
def home(request):
settings = request.registry.settings
return {
'eur_price': float(settings.get('paypal.month_price', 2)),
'btc_price': float(settings.get('bitcoin.month_price', 0.02)),
'motd': settings.get('motd'),
}
@view_config(route_name='ca_crt')
def ca_crt(request):
return HTTPOk(body=account.openvpn_ca)
@view_config(route_name='page', renderer='page.mako')
def page(request):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
pagesdir = os.path.join(root, 'pages/')
basename = pagesdir + request.matchdict['page']
irc_username = request.user.username if request.user else '?'
try:
translated_file = basename + '.' + request.locale_name + '.md'
fallback_file = basename + '.md'
if os.path.isfile(translated_file):
template = translated_file
elif os.path.isfile(fallback_file):
template = fallback_file
else:
raise FileNotFoundError()
with open(template, encoding='utf8') as template_f:
mdt = template_f.read()
mdt = mdt.replace('${irc_username}', irc_username)
md = markdown.Markdown(extensions=['toc', 'meta',
'codehilite(noclasses=True)'])
content = md.convert(mdt)
title = md.Meta['title'][0] if 'title' in md.Meta else None
return {'content': content, 'title': title}
except FileNotFoundError:
return HTTPNotFound()
def format_bps(bits):
multiples = ((1e9, 'G'), (1e6, 'M'), (1e3, 'K'), (0, ''))
for d, m in multiples:
if bits < d:
continue
n = bits / (d or 1)
return '{:2g}{}bps'.format(n, m)
@view_config(route_name='status', renderer='status.mako')
def status(request):
settings = request.registry.settings
domain = settings.get('net_domain', '')
gateways = DBSession.query(Gateway) \
.filter_by(enabled=True) \
.order_by(Gateway.country, Gateway.name) \
.all()
l = list(gateways)
for host in l:
host.host_name = '%s-%s.%s'%(host.country, host.name, domain)
host.bps_formatted = format_bps(host.bps)
return {
'gateways': l,
'n_users': DBSession.query(func.count(User.id))
.filter_by(is_paid=True).scalar(),
'n_connected': DBSession.query(func.count(VPNSession.id)) \
.filter(VPNSession.is_online==True).scalar(),
'n_countries': len(set(i.country for i in l)),
'total_bw': format_bps(sum(i.bps for i in l)),
}
| 2 | 2 |
rx/subjects/subject.py | MichaelSchneeberger/RxPY | 0 | 13430 | <filename>rx/subjects/subject.py
import threading
from typing import Any, List, Optional
from rx.disposable import Disposable
from rx.core.typing import Observer, Scheduler
from rx.core import Observable, typing
from rx.internal import DisposedException
from .anonymoussubject import AnonymousSubject
from .innersubscription import InnerSubscription
class Subject(Observable, Observer):
"""Represents an object that is both an observable sequence as well
as an observer. Each notification is broadcasted to all subscribed
observers.
"""
def __init__(self) -> None:
super().__init__()
self.is_disposed = False
self.is_stopped = False
self.observers: List[Observer] = []
self.exception: Optional[Exception] = None
self.lock = threading.RLock()
def check_disposed(self):
if self.is_disposed:
raise DisposedException()
def _subscribe_core(self, observer: Observer, scheduler: Scheduler = None) -> typing.Disposable:
with self.lock:
self.check_disposed()
if not self.is_stopped:
self.observers.append(observer)
return InnerSubscription(self, observer)
if self.exception:
observer.on_error(self.exception)
return Disposable()
observer.on_completed()
return Disposable()
def on_completed(self) -> None:
"""Notifies all subscribed observers of the end of the
sequence."""
observers = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
observers = self.observers[:]
self.observers = []
self.is_stopped = True
if observers:
for observer in observers:
observer.on_completed()
def on_error(self, error: Exception) -> None:
"""Notifies all subscribed observers with the exception.
Args:
error: The exception to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
self.observers = []
self.is_stopped = True
self.exception = error
if os:
for observer in os:
observer.on_error(error)
def on_next(self, value: Any) -> None:
"""Notifies all subscribed observers with the value.
Args:
value: The value to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
if os:
for observer in os:
observer.on_next(value)
def dispose(self) -> None:
"""Unsubscribe all observers and release resources."""
with self.lock:
self.is_disposed = True
self.observers = []
@classmethod
def create(cls, observer, observable):
return AnonymousSubject(observer, observable)
| 2.5625 | 3 |
scripts/uda.py | nng555/fairseq | 2 | 13431 | <filename>scripts/uda.py
import os
import hydra
import subprocess
import logging
from omegaconf import DictConfig
from hydra import slurm_utils
log = logging.getLogger(__name__)
@hydra.main(config_path='/h/nng/conf/robust/config.yaml', strict=False)
def launch(cfg: DictConfig):
os.environ['NCCL_DEBUG'] = 'INFO'
if cfg.data.task in ['nli']:
base_path = '/scratch/ssd001/datasets/'
elif cfg.data.task in ['sentiment']:
base_path = '/h/nng/data'
else:
raise Exception('task %s data path not found'.format(cfg.data.task))
data_dir = os.path.join(base_path, cfg.data.task, cfg.data.name, cfg.data.fdset)
flags = [data_dir, str(cfg.gen.num_shards), str(cfg.gen.shard), str(cfg.gen.sampling_temp), cfg.gen.fname]
command = ['bash', 'run.sh'] + flags
os.chdir('/h/nng/programs/uda/back_translate')
log.info(' '.join(command))
subprocess.call(command)
if __name__ == "__main__":
launch()
| 1.859375 | 2 |
06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 0 | 13432 | <reponame>MaryMP11/The_Bridge_School_DataScience_PT
from flask import Flask, request, jsonify, session, url_for, redirect, render_template
import joblib
from flower_form import FlowerForm
classifier_loaded = joblib.load("application_iris/saved_models/knn_iris_dataset.pkl")
encoder_loaded = joblib.load("application_iris/saved_models/iris_label_encoder.pkl")
# prediction function
def make_prediction(model, encoder, sample_json):
# parse input from request
SepalLengthCm = sample_json['SepalLengthCm']
SepalWidthCm = sample_json['SepalWidthCm']
PetalLengthCm = sample_json['PetalLengthCm']
PetalWidthCm = sample_json['PetalWidthCm']
# Make an input vector
flower = [[SepalLengthCm, SepalWidthCm, PetalLengthCm, PetalWidthCm]]
# Predict
prediction_raw = model.predict(flower)
# Convert Species index to Species name
prediction_real = encoder.inverse_transform(prediction_raw)
return prediction_real[0]
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
@app.route("/", methods=['GET','POST'])
def index():
form = FlowerForm()
if form.validate_on_submit():
session['SepalLengthCm'] = form.SepalLengthCm.data
session['SepalWidthCm'] = form.SepalWidthCm.data
session['PetalLengthCm'] = form.PetalLengthCm.data
session['PetalWidthCm'] = form.PetalWidthCm.data
return redirect(url_for("prediction"))
return render_template("home.html", form=form)
# Read models
# classifier_loaded = joblib.load("saved_models/01.knn_with_iris_dataset.pkl")
# encoder_loaded = joblib.load("saved_models/02.iris_label_encoder.pkl")
@app.route('/prediction')
def prediction():
content = {'SepalLengthCm': float(session['SepalLengthCm']), 'SepalWidthCm': float(session['SepalWidthCm']),
'PetalLengthCm': float(session['PetalLengthCm']), 'PetalWidthCm': float(session['PetalWidthCm'])}
results = make_prediction(classifier_loaded, encoder_loaded, content)
return render_template('prediction.html', results=results)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080) | 2.875 | 3 |
test.py | EdwinChan/python-physical | 2 | 13433 | import math
import re
import unittest
import urllib.error
import urllib.request
from .core import Quantity
from .define import defined_systems
si = defined_systems['si']
esu = defined_systems['esu']
emu = defined_systems['emu']
gauss = defined_systems['gauss']
class PhysicalQuantitiesTest(unittest.TestCase):
def assert_quantity_equal(self, first, second):
self.assertAlmostEqual(first.value, second.value)
self.assertAlmostEqual(first.error, second.error)
self.assertEqual(first.units, second.units)
self.assertEqual(first.system, second.system)
def test_sign(self):
a = Quantity(1, 0.2, {'Kilogram': 1}, si)
b = Quantity(-1, 0.2, {'Kilogram': 1}, si)
self.assert_quantity_equal(+a, a)
self.assert_quantity_equal(+b, b)
self.assert_quantity_equal(-a, b)
self.assert_quantity_equal(-b, a)
self.assert_quantity_equal(abs(a), a)
self.assert_quantity_equal(abs(b), a)
def test_add(self):
a = Quantity(1, 0.2, {'Newton': 1}, si)
b = Quantity(3, 0.4, {'Kilogram': 1, 'Meter': 1, 'Second': -2}, si)
c = Quantity(4, 1 / math.sqrt(5), {'Newton': 1}, si)
d = Quantity(1, 0.2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a + b, c.expand())
with self.assertRaises(TypeError): a + d
with self.assertRaises(TypeError): a + 1
def test_subtract(self):
a = Quantity(1, 0.2, {'Newton': 1}, si)
b = Quantity(3, 0.4, {'Kilogram': 1, 'Meter': 1, 'Second': -2}, si)
c = Quantity(-2, 1 / math.sqrt(5), {'Newton': 1}, si)
d = Quantity(1, 0.2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a - b, c.expand())
with self.assertRaises(TypeError): a - d
with self.assertRaises(TypeError): a - 1
def test_multiply(self):
a = Quantity(1, 0.2, {'Kilogram': 1}, si)
b = Quantity(3, 0.4, {'Meter': -2}, si)
c = Quantity(3, math.sqrt(13) / 5, {'Kilogram': 1, 'Meter': -2}, si)
self.assert_quantity_equal(a * b, c)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) * 5
b = Quantity(5, 1, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) * -5
b = Quantity(-5, 1, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = 5 * Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(15, 2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = -5 * Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(-15, 2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
def test_divide(self):
a = Quantity(2, 0.1, {'Kilogram': 1}, si)
b = Quantity(4, 0.3, {'Meter': -2}, si)
c = Quantity(0.5, math.sqrt(13) / 80,
{'Kilogram': 1, 'Meter': 2}, si)
self.assert_quantity_equal(a / b, c)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) / 5
b = Quantity(0.2, 0.04, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) / -5
b = Quantity(-0.2, 0.04, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = 5 / Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(5/3, 2/9, {'Kilogram': -1}, si)
self.assert_quantity_equal(a, b)
a = -5 / Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(-5/3, 2/9, {'Kilogram': -1}, si)
self.assert_quantity_equal(a, b)
def test_power(self):
a = Quantity(3, 0.4, {'Kilogram': 1, 'Meter': 1}, si) ** 5
b = Quantity(243, 162, {'Kilogram': 5, 'Meter': 5}, si)
self.assert_quantity_equal(a, b)
def test_almost_equals(self):
a = Quantity(1, 0.5, {'Kilogram': 1}, si)
b = Quantity(2, 0.7, {'Kilogram': 1}, si)
c = Quantity(3, 0.9, {'Kilogram': 1}, si)
d = Quantity(1, 0.5, {'Meter': 1}, si)
e = Quantity(1, 0.5, {}, si)
f = Quantity(2, 0.7, {}, si)
self.assertTrue(a.almost_equals(b))
self.assertFalse(a.almost_equals(c))
self.assertRaises(TypeError, a.almost_equals, d)
for x in [a, b, c, d]:
self.assertRaises(TypeError, x.almost_equals, 1)
self.assertTrue(e.almost_equals(1))
self.assertTrue(f.almost_equals(2))
self.assertFalse(e.almost_equals(2))
self.assertFalse(f.almost_equals(1))
self.assertTrue(e.almost_equals(f))
def test_float(self):
a = Quantity(1, 0, {'Second': 1, 'Hertz': 1}, si)
b = Quantity(365.25 * 86400, 0, {'Second': 1, 'JulianYear': -1}, si)
self.assertEqual(math.cos(a), math.cos(1))
self.assertEqual(math.cos(b), math.cos(1))
def test_expand(self):
# Lorentz force
a = Quantity(1, 0,
{'Coulomb': 1, 'Meter': 1, 'Second': -1, 'Tesla': 1}, si)
b = Quantity(1, 0, {'Newton': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
# Faraday's law
a = Quantity(1, 0, {'Weber': 1, 'Second': -1}, si)
b = Quantity(1, 0, {'Volt': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
# torque of a motor
a = Quantity(1, 0, {'Ampere': 1, 'Tesla': 1, 'Meter': 2}, si)
b = Quantity(1, 0, {'Newton': 1, 'Meter': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
# resonance frequency of an RLC circuit
a = Quantity(1, 0, {'Henry': -1/2, 'Farad': -1/2}, si)
b = Quantity(1, 0, {'Hertz': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
def test_simple_constants(self):
for system in defined_systems.values():
a = Quantity(13.6, 0,
{'ElectronVolt': 1, 'RydbergEnergy': -1}, system).expand()
self.assertAlmostEqual(a.value, 1, places=3)
self.assertEqual(a.units, {})
a = system.get_constant('FineStructureConstant').expand() * 137
self.assertAlmostEqual(a.value, 1, places=3)
self.assertEqual(a.units, {})
def test_electromagnetic_constants(self):
from . import si, esu, emu, gauss
a = (si.e**2 / si.a0 / (4*math.pi*si.epsilon0) / (1e-7*si.J)).expand()
b = (esu.e**2 / esu.a0 / esu.erg).expand()
c = (emu.e**2 / emu.a0 * emu.c**2 / emu.erg).expand()
d = (gauss.e**2 / gauss.a0 / gauss.erg).expand()
self.assertAlmostEqual(a.value * 1e11, b.value * 1e11)
self.assertAlmostEqual(a.value * 1e11, c.value * 1e11)
self.assertAlmostEqual(a.value * 1e11, d.value * 1e11)
a = (si.muB**2 / si.a0**3 * si.mu0 / (1e-7*si.J)).expand()
b = (esu.muB**2 / esu.a0**3 / esu.c**2 / esu.erg).expand()
c = (emu.muB**2 / emu.a0**3 / emu.erg).expand()
d = (gauss.muB**2 / gauss.a0**3 / gauss.erg).expand()
self.assertAlmostEqual(a.value * 1e3, b.value * 1e3)
self.assertAlmostEqual(a.value * 1e3, c.value * 1e3)
self.assertAlmostEqual(a.value * 1e3, d.value * 1e3)
def test_codata(self):
url = 'http://physics.nist.gov/cuu/Constants/Table/allascii.txt'
units = {
'AtomicMassUnit': 'unified atomic mass unit'}
constants = {
'AvogadroConstant': 'Avogadro constant',
'ElectronGFactor': 'electron g factor',
'ProtonGFactor': 'proton g factor',
'NeutronGFactor': 'neutron g factor',
'MuonGFactor': 'muon g factor',
'LightSpeed': 'speed of light in vacuum',
'ElementaryCharge': 'atomic unit of charge',
'PlanckConstant': 'Planck constant',
'BoltzmannConstant': 'Boltzmann constant',
'GravitationalConstant': 'Newtonian constant of gravitation',
'VacuumPermeability': 'vacuum mag. permeability',
'ElectronMass': 'electron mass',
'ProtonMass': 'proton mass',
'NeutronMass': 'neutron mass',
'MuonMass': 'muon mass'}
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
raise ValueError('Cannot download data.')
data = iter(response.read().decode('ascii').rstrip('\n').split('\n'))
while not next(data).startswith('--'):
pass
data = (re.split(' {2,}', x) for x in data)
def parse_value(x):
return float(x.replace(' ', '').replace('...', ''))
def parse_error(x):
return 0 if x == '(exact)' else float(x.replace(' ', ''))
data = {x: (parse_value(y), parse_error(z)) for x, y, z, *_ in data}
for local_name, codata_name in units.items():
quantity = Quantity(1, 0, {local_name: 1}, si).expand()
x, y = data[codata_name]
assert math.isclose(quantity.value, x)
assert math.isclose(quantity.error, y)
for local_name, codata_name in constants.items():
quantity = si.get_constant(local_name).expand()
x, y = data[codata_name]
assert math.isclose(quantity.value, x)
assert math.isclose(quantity.error, y)
if __name__ == '__main__':
unittest.main()
| 3.125 | 3 |
test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | ucsd-ccbb/cirrus-ngs | 8 | 13434 | import unittest
import sys
import os
sys.path.append(os.getcwd().replace("test", "src"))
import cirrus_ngs.cfnCluster.ConnectionManager as ConnectionManager
import paramiko
import tempfile
import re
##THIS TEST WILL NOT WORK##
class test_ConnectionManager(unittest.TestCase):
def test_paramiko(self):
key_file = tempfile.NamedTemporaryFile()
key_file.write(b"notakey")
self.assertRaises(paramiko.SSHException, paramiko.RSAKey.from_private_key_file, key_file.name)
key_file.close()
#key path
new_key = ""
#checks to make sure a real key file works. will not be portable
#leaving my ssh key for users to download for tests seems not smart
paramiko.RSAKey.from_private_key_file(new_key)
def test_connect_master(self):
#ip
hostname = ""
username = "ec2-user"
key_file = tempfile.NamedTemporaryFile()
key_file.write(b"not_a_key")
key_file.seek(0)
self.assertRaises(paramiko.SSHException, ConnectionManager.connect_master, hostname, username, key_file.name)
key_file.close()
#this won't even work elsewhere but I don't want to put my keyfile into the eepo
#key path
new_key = ""
ConnectionManager.connect_master(hostname, username, new_key)
#checks if last line in the standard output is "connected"
out = sys.stdout.getvalue().strip()
last_line = out.split()[-1]
self.assertEqual(last_line, "connected")
#checks that connected and connecting only are printed once exactly
num_connected = len(re.findall("connected", out))
self.assertEqual(1, num_connected)
num_connecting = len(re.findall("connecting", out))
self.assertEqual(1, num_connecting)
def test_execute_command(self):
#ip
hostname = ""
username = "ec2-user"
#key path
key = ""
ssh_client = ConnectionManager.connect_master(hostname, username, key)
command = "pwd"
#checks that the pwd command worked
self.assertEqual(ConnectionManager.execute_command(ssh_client, command), "/home/ec2-user\n")
ssh_client = "not an ssh_client"
#makes sure that an error is raised when a non sshclient is passed in
self.assertRaises(AttributeError, ConnectionManager.execute_command, ssh_client, command)
def test_copy_file(self):
#ip
hostname = ""
username = "ec2-user"
#key path
key = ""
ssh_client = ConnectionManager.connect_master(hostname, username, key)
temp = tempfile.NamedTemporaryFile()
localpath = temp.name
remotepath = "/home/ec2-user"
ConnectionManager.copy_file(ssh_client, localpath, remotepath)
out = sys.stdout.getvalue().strip().split()[-2:]
#checks that the copy file prints the local and remote paths
self.assertEqual(out, [localpath, remotepath])
ls_output = ConnectionManager.execute_command(ssh_client,
"ls tmp* | wc -l")
ConnectionManager.execute_command(ssh_client, "rm tmp*")
#checks that there is exactly 1 tempfile in the home directory of the server
self.assertEqual(ls_output.strip(), "1")
#makes sure it doesn't work with a nonfile
self.assertRaises(FileNotFoundError, ConnectionManager.copy_file,
ssh_client, "fakefile", "/home/ec2-user")
#########################################################################
#copy_gatk, list_dir, and close_connection are considered trivial methods
#and are not tested
#########################################################################
if __name__ == "__main__":
unittest.main(module=__name__, buffer=True, exit=False)
| 2.453125 | 2 |
src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 0 | 13435 | <reponame>DTG-FRESCO/opus
# -*- coding: utf-8 -*-
'''
Commands for launching processes with or without OPUS interposition.
'''
from __future__ import absolute_import, division, print_function
import argparse
import os
import psutil
from .. import config, server_start, utils
def get_current_shell():
ppid = os.getppid()
parent = psutil.Process(ppid);
cur_shell = parent.exe()
shell_args = parent.cmdline()[1:]
return cur_shell, shell_args
@config.auto_read_config
def handle_launch(cfg, binary, arguments):
if not utils.is_server_active(cfg=cfg):
if not server_start.start_opus_server(cfg):
print("Aborting command launch.")
return
opus_preload_lib = utils.path_normalise(os.path.join(cfg['install_dir'],
'lib',
'libopusinterpose.so')
)
if 'LD_PRELOAD' in os.environ:
if opus_preload_lib not in os.environ['LD_PRELOAD']:
os.environ['LD_PRELOAD'] = (os.environ['LD_PRELOAD'] + " " +
opus_preload_lib)
else:
os.environ['LD_PRELOAD'] = opus_preload_lib
if cfg['server_addr'][:4] == "unix":
os.environ['OPUS_UDS_PATH'] = utils.path_normalise(cfg['server_addr'][7:])
os.environ['OPUS_PROV_COMM_MODE'] = cfg['server_addr'][:4]
else:
os.environ['OPUS_PROV_COMM_MODE'] = cfg['server_addr'][:3]
addr = cfg['server_addr'][6:].split(":")
os.environ['OPUS_TCP_ADDRESS'] = addr[0]
os.environ['OPUS_TCP_PORT'] = addr[1]
os.environ['OPUS_MSG_AGGR'] = "1"
os.environ['OPUS_MAX_AGGR_MSG_SIZE'] = "65536"
os.environ['OPUS_LOG_LEVEL'] = "3" # Log critical
os.environ['OPUS_INTERPOSE_MODE'] = "1" # OPUS lite
if not binary:
binary, arguments = get_current_shell()
os.execvp(binary, [binary] + arguments)
@config.auto_read_config
def handle_exclude(cfg, binary, arguments):
if utils.is_opus_active():
utils.reset_opus_env(cfg)
else:
print("OPUS is not active.")
if not binary:
binary, arguments = get_current_shell()
os.execvp(binary, [binary] + arguments)
def handle(cmd, **params):
if cmd == "launch":
handle_launch(**params)
elif cmd == "exclude":
handle_exclude(**params)
def setup_parser(parser):
cmds = parser.add_subparsers(dest="cmd")
launch = cmds.add_parser(
"launch",
help="Launch a process under OPUS.")
launch.add_argument(
"binary", nargs='?',
help="The binary to be launched. Defaults to the current shell.")
launch.add_argument(
"arguments", nargs=argparse.REMAINDER,
help="Any arguments to be passed.")
exclude = cmds.add_parser(
"exclude",
help="Launch a process excluded from OPUS interposition.")
exclude.add_argument(
"binary", nargs='?',
help="The binary to be launched. Defaults to the current shell.")
exclude.add_argument(
"arguments", nargs=argparse.REMAINDER,
help="Any arguments to be passed.")
| 2.109375 | 2 |
tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 0 | 13436 | <reponame>ansible-self-service/ansible-self-service
import pytest
from ansible_self_service.l2_infrastructure.app_collection_config_parser import AppCollectionConfigValidationException, \
YamlAppCollectionConfigParser
from ansible_self_service.l4_core.models import AppCategory, App
VALID_CATEGORY_NAME = 'Misc'
VALID_ITEM_NAME = 'Cowsay'
VALID_ITEM_DESCRIPTION = 'Let an ASCII cow say stuff in your terminal!'
VALID_CONFIG = f"""
categories:
{VALID_CATEGORY_NAME}: {{}}
items:
{VALID_ITEM_NAME}:
description: |
{VALID_ITEM_DESCRIPTION}
categories:
- {VALID_CATEGORY_NAME}
image_url: https://upload.wikimedia.org/wikipedia/commons/8/80/Cowsay_Typical_Output.png
playbook: playbooks/cowsay.yml
params:
ansible_become_password:
type: secret
mandatory: true
requirements: > # any expression that we could use for a tasks "when" clause; items are ANDed
- ansible_distribution == 'Ubuntu'
"""
INVALID_CONFIG = '''
this is not even YAML
'''
def test_parse_valid_file(tmpdir):
config_file = tmpdir.join('self-service.yaml')
config_file.write(VALID_CONFIG)
repo_config_parser = YamlAppCollectionConfigParser()
categories, apps = repo_config_parser.from_file(config_file)
assert categories == [AppCategory(name=VALID_CATEGORY_NAME)]
assert apps == [App(
name=VALID_ITEM_NAME, description=VALID_ITEM_DESCRIPTION, categories=[AppCategory(name=VALID_CATEGORY_NAME)])
]
def test_parse_invalid_file(tmpdir):
config_file = tmpdir.join('self-service.yaml')
config_file.write(INVALID_CONFIG)
repo_config_parser = YamlAppCollectionConfigParser()
with pytest.raises(AppCollectionConfigValidationException):
repo_config_parser.from_file(config_file)
| 2.3125 | 2 |
api-server.py | proatria/sftpplus-api-example | 0 | 13437 | <filename>api-server.py<gh_stars>0
"""
Run a simple HTTP server which provides API endpoint for SFTPPlus.
Usage:
server.py [options]
-h --help Show this help.
-p --port=8000 Listen to a specific port. [default: 8080]
-a --address=127.0.0.1 Listen on specific address. [default: 0.0.0.0]
-c --certificate=PATH Enable HTTPS by defining the path
to a file containing server key, certificate, and CA chain
all PEM format and stored in a single file.
-f --flaky Introduce random errors to test SFTPPlus API retry functionality.
The following API endpoints are provided:
* /auth-api - For the authentication API
* /event-api - For the event handler API
"""
from __future__ import absolute_import, unicode_literals
import base64
import json
import ssl
from random import randint
from aiohttp import web
from docopt import docopt
# Command line handling part.
arguments = docopt(__doc__)
# Convert arguments to usable types.
port = int(arguments["--port"])
# Need to escape the address for ipv6.
address = arguments["--address"].replace(":", r"\:")
is_flaky = arguments["--flaky"]
certificate = arguments["--certificate"]
# Set to lower values to increase the probability of a failure.
_FLAKY_DEGREE = 3
# DB with accepted accounts.
# Each key is the name of an user.
# Each value contains the accepted password and/or SSH-key.
ACCOUNTS = {
# An account with some custom configuration.
# Configuration that is not explicitly defined here is extracted based on
# the SFTPPlus group.
"test-user": {
"password": "<PASSWORD>",
# Just the public key value, in OpenSSH format.
# Without hte key type or comments.
"ssh-public-key": "<KEY>
"configuration": {
"home_folder_path": "/tmp",
# EXTRA_DATA is not yet supported.
# 'extra_data': {
# 'file_api_token': '<PASSWORD>',
# },
},
},
# An account with default configuration extracted from
# the default SFTPPlus group.
# SSH-Key authentication is disabled for this user.
"default-user": {
"password": "<PASSWORD>",
"ssh-public-key": "",
"configuration": {},
},
}
async def handle_root(request):
return web.Response(text="Demo SFTPPlus API endpoints.")
async def handle_auth(request):
"""
This is triggered for authentication API calls.
"""
request_json = await get_json(request)
print("\n\n")
print("-" * 80)
print("New authentication request received")
print(json.dumps(request_json, indent=2))
if is_flaky and randint(0, _FLAKY_DEGREE) == 0:
print("TRIGGERING AN EMULATED FAILURE")
return web.Response(status=500, text="Failed to process the request")
credentials = request_json["credentials"]
account = ACCOUNTS.get(credentials["username"], None)
if account is None:
# This is not an account handled by this authentication API.
# Inform SFTPPus that it can try to authenticate the user via other
# method (LDAP, or another HTTP authentication server).
print("UNKNOWN USER")
return web.Response(
status=401, text="User not handled by our API. Try other method."
)
response = {"account": account.get("configuration", {})}
if credentials["type"] in ["password", "password-basic-auth"]:
# We have password based authentication.
if credentials["content"] != account["password"]:
print("INVALID PASSWORD")
return web.Response(status=403, text="Password rejected.")
# Valid password.
print("VALID PASSWORD")
return web.json_response(response)
if credentials["type"] == "ssh-key":
# We have SSH-key based authentication.
# The keys are encoded as BASE64, but we compare them as bytes.
if base64.b64decode(credentials["content"]) != base64.b64decode(
account["ssh-public-key"]
):
print("INVALID SSH-KEY")
return web.Response(status=403, text="SSH-Key rejected.")
# Valid SSH key authentication.
print("VALID SSH-KEY")
return web.json_response(response)
return web.Response(status=403, text="Credentials type not supported.")
async def handle_event(request):
"""
This is triggered by the event handler API calls.
"""
print("\n\n")
print("-" * 80)
print("New event handler call")
print("-" * 80)
print("Headers:")
for key, value in request.headers.items():
print(f" {key}: {value}")
print("-" * 80)
print("Payload:")
await get_json(request)
if is_flaky and randint(0, _FLAKY_DEGREE) == 0:
print("TRIGGERING AN EMULATED FAILURE")
return web.Response(status=500, text="Failed to process the request")
# An empty response body can be used to confirm that the event
# was received successfully by the API server.
# This instruct SFTPPlus not to retry.
return web.Response(status=204, text="")
async def get_json(request):
"""
Return the json dict from `request`.
It also logs the JSON
"""
result = {}
try:
result = await request.json()
except json.JSONDecodeError:
print("INVALID JSON RECEIVED")
text = await request.text()
print(text)
result = {}
else:
print(json.dumps(result, indent=2))
return result
app = web.Application()
app.add_routes(
[
web.get("/", handle_root),
web.post("/auth-api", handle_auth),
web.post("/event-api", handle_event),
]
)
ssl_context = None
if certificate:
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.load_cert_chain(certificate, certificate)
if __name__ == "__main__":
web.run_app(app, host=address, port=port, ssl_context=ssl_context)
| 3.28125 | 3 |
arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2 | 13438 | <reponame>arkfbp/arkfbp-py
from .base import Flow
from .view_flow import ViewFlow
| 0.984375 | 1 |
ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 0 | 13439 | from config.constant import ExportItemConstant, ExportItemTypeConstant, EventConstant, TransactionConstant
from ethereumetl.service.eth_event_service import EthEvent
class EthEventMapper(object):
def eth_event_to_dict(self, eth_event: EthEvent):
d1 = {
ExportItemConstant.type: ExportItemTypeConstant.event,
EventConstant.event_type: eth_event.event_type,
EventConstant.contract_address: eth_event.contract_address,
TransactionConstant.transaction_hash: eth_event.transaction_hash,
EventConstant.log_index: eth_event.log_index,
TransactionConstant.block_number: eth_event.block_number,
}
d2 = eth_event.params
return {**d1, **d2}
| 2.234375 | 2 |
openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 3 | 13440 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_lead_forward_to_partner(osv.TransientModel):
""" Forward info history to partners. """
_name = 'crm.lead.forward.to.partner'
_inherit = "mail.compose.message"
def _get_composition_mode_selection(self, cr, uid, context=None):
composition_mode = super(crm_lead_forward_to_partner, self)._get_composition_mode_selection(cr, uid, context=context)
composition_mode.append(('forward', 'Forward'))
return composition_mode
_columns = {
'partner_ids': fields.many2many('res.partner',
'lead_forward_to_partner_res_partner_rel',
'wizard_id', 'partner_id', 'Additional contacts'),
'attachment_ids': fields.many2many('ir.attachment',
'lead_forward_to_partner_attachment_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'history_mode': fields.selection([('info', 'Internal notes'),
('latest', 'Latest email'), ('whole', 'Whole Story')],
'Send history', required=True),
}
_defaults = {
'history_mode': 'info',
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
# set as comment, perform overrided document-like action that calls get_record_data
old_mode = context.get('default_composition_mode', 'forward')
context['default_composition_mode'] = 'comment'
res = super(crm_lead_forward_to_partner, self).default_get(cr, uid, fields, context=context)
# back to forward mode
context['default_composition_mode'] = old_mode
res['composition_mode'] = context['default_composition_mode']
return res
def get_record_data(self, cr, uid, model, res_id, context=None):
""" Override of mail.compose.message, to add default values coming
form the related lead.
"""
if context is None:
context = {}
res = super(crm_lead_forward_to_partner, self).get_record_data(cr, uid, model, res_id, context=context)
if model not in ('crm.lead') or not res_id:
return res
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_partner_assign_email_template')[1]
context['history_mode'] = context.get('history_mode','whole')
mail_body_fields = ['partner_id', 'partner_name', 'title', 'function', 'street', 'street2', 'zip', 'city', 'country_id', 'state_id', 'email_from', 'phone', 'fax', 'mobile', 'description']
lead = self.pool.get('crm.lead').browse(cr, uid, res_id, context=context)
context['mail_body'] = self.pool.get('crm.lead')._mail_body(cr, uid, lead, mail_body_fields, context=context)
template = self.generate_email_for_composer(cr, uid, template_id, res_id, context)
res['subject'] = template['subject']
res['body'] = template['body']
return res
def on_change_history_mode(self, cr, uid, ids, history_mode, model, res_id, context=None):
""" Update body when changing history_mode """
if context is None:
context = {}
if model and model == 'crm.lead' and res_id:
lead = self.pool.get(model).browse(cr, uid, res_id, context=context)
context['history_mode'] = history_mode
body = self.get_record_data(cr, uid, 'crm.lead', res_id, context=context)['body']
return {'value': {'body': body}}
def create(self, cr, uid, values, context=None):
""" TDE-HACK: remove 'type' from context, because when viewing an
opportunity form view, a default_type is set and propagated
to the wizard, that has a not matching type field. """
default_type = context.pop('default_type', None)
new_id = super(crm_lead_forward_to_partner, self).create(cr, uid, values, context=context)
if default_type:
context['default_type'] = default_type
return new_id
def action_forward(self, cr, uid, ids, context=None):
""" Forward the lead to a partner """
if context is None:
context = {}
res = {'type': 'ir.actions.act_window_close'}
wizard = self.browse(cr, uid, ids[0], context=context)
if wizard.model not in ('crm.lead'):
return res
lead = self.pool.get(wizard.model)
lead_ids = wizard.res_id and [wizard.res_id] or []
if wizard.composition_mode == 'mass_mail':
lead_ids = context and context.get('active_ids', []) or []
value = self.default_get(cr, uid, ['body', 'email_to', 'email_cc', 'subject', 'history_mode'], context=context)
self.write(cr, uid, ids, value, context=context)
return self.send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.773438 | 2 |
losses.py | DensenDavis/yolov5_tf2 | 0 | 13441 | import tensorflow as tf
from tensorflow.keras.losses import binary_crossentropy,sparse_categorical_crossentropy
from config import Configuration
cfg = Configuration()
class YOLOLoss(tf.losses.Loss):
def __init__(self, anchors):
super(YOLOLoss, self).__init__(reduction="none", name="YOLOLoss")
self.anchors = tf.constant(anchors)
def _meshgrid(self, n_a, n_b):
return [
tf.reshape(tf.tile(tf.range(n_a), [n_b]), (n_b, n_a)),
tf.reshape(tf.repeat(tf.range(n_b), n_a), (n_b, n_a))
]
def broadcast_iou(self, box_1, box_2):
# box_1: (..., (x1, y1, x2, y2))
# box_2: (N, (x1, y1, x2, y2))
# broadcast boxes
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
# new_shape: (..., N, (x1, y1, x2, y2))
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
return int_area / (box_1_area + box_2_area - int_area)
def yolo_boxes(self, pred, classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1:3]
box_xy, box_wh, objectness, class_probs = tf.split(pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = self._meshgrid(grid_size[1],grid_size[0])
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * self.anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def call(self, y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = self.yolo_boxes(y_pred, cfg.num_classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / self.anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
lambda x: tf.reduce_max(self.broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < cfg.train_iou_threshold, tf.float32)
# 5. calculate all losses
xy_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss
class_loss = obj_mask * sparse_categorical_crossentropy(true_class_idx, pred_class)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
| 2.3125 | 2 |
test/stress/mmlogic.py | dzlier-gcp/open-match | 0 | 13442 | <gh_stars>0
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import json
from locust import HttpLocust, TaskSequence, task, seq_task
from util import ticket_generator, pool_generator, ATTRIBUTE_LIST
NUM_QUERY_ATTR = 20
class ClientBehavior(TaskSequence):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.init()
def init(self):
# Placeholder for initialize future TLS materials and request generators
create_payload = {
"method": "POST",
"endpoint": "/v1/frontend/tickets",
"params": None,
"body": None
}
# Each spawned client first generate 10 tickets then do the query to mmlogic (data) layer
# Total number of tickets in open-match would be 10 * # of spawned clients
for i in range(10):
self.client.request(create_payload["method"], create_payload["endpoint"], params=None, data=json.dumps(ticket_generator()))
@task(1)
def query_ticket(self):
query_payload = {
"method": "POST",
"endpoint": "/v1/mmlogic/tickets:query",
"params": None,
"body": pool_generator(random.choices(ATTRIBUTE_LIST, k=NUM_QUERY_ATTR))
}
method, endpoint, params, data, name = query_payload["method"], query_payload["endpoint"], None, json.dumps(query_payload["body"]), "Query: {}".format(query_payload["endpoint"])
with self.client.request(method, endpoint, name=name, params=params, data=data, catch_response=True) as response:
if response.status_code != 200:
response.failure("Got status code {}, was expected 200.".format(response.content))
class WebsiteUser(HttpLocust):
task_set = ClientBehavior
min_wait = 500
max_wait = 1500
| 2.265625 | 2 |
benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 6 | 13443 | #! /usr/bin/env python3
import sys
import math
import glob
from mule_local.postprocessing.pickle_SphereDataSpectralDiff import *
from mule.exec_program import *
# Ugly hack!
#output, retval = exec_program('ls *benchref*/*prog_h* | sort | tail -n 1 | sed "s/.*prog_h//"')
#if retval != 0:
# print(output)
# raise Exception("Something went wrong")
#output = output.replace("\n", '')
#output = output.replace("\r", '')
#p = pickle_SphereDataSpectralDiff(output)
p = pickle_SphereDataSpectralDiff()
| 2.046875 | 2 |
src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 3 | 13444 | <filename>src/houdini_package_runner/items/base.py
"""This module contains a base runnable item."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List
# Imports for type checking.
if TYPE_CHECKING:
import pathlib
import houdini_package_runner.runners.base
# =============================================================================
# CLASSES
# =============================================================================
class BaseItem(ABC):
"""Base class for a runnable item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, write_back: bool = False) -> None:
self._contents_changed = False
self._ignored_builtins: List[str] = []
self._is_single_line = False
self._is_test_item = False
self._write_back = write_back
def __repr__(self):
return f"<{self.__class__.__name__}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def contents_changed(self) -> bool:
"""Whether the contents of the item have changed."""
return self._contents_changed
@contents_changed.setter
def contents_changed(self, contents_changed: bool):
self._contents_changed = contents_changed
# -------------------------------------------------------------------------
@property
def ignored_builtins(self) -> List[str]:
"""A list of known builtins to ignore for checks which look for imports."""
return self._ignored_builtins
# -------------------------------------------------------------------------
@property
def is_single_line(self) -> bool:
"""Whether the item code on a single line."""
return self._is_single_line
# -------------------------------------------------------------------------
@property
def is_test_item(self) -> bool:
"""Whether the item is a test related item."""
return self._is_test_item
@is_test_item.setter
def is_test_item(self, is_test_item: bool):
self._is_test_item = is_test_item
# -------------------------------------------------------------------------
@property
def write_back(self) -> bool:
"""Whether the item should write changes back."""
return self._write_back
@write_back.setter
def write_back(self, write_back):
self._write_back = write_back
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
class BaseFileItem(BaseItem):
"""Base class for a runnable item.
:param path: The path for the item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, path: pathlib.Path, write_back: bool = False) -> None:
super().__init__(write_back=write_back)
self._path = path
def __repr__(self):
return f"<{self.__class__.__name__} {self.path}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def path(self) -> pathlib.Path:
"""The path on disk."""
return self._path
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
| 2.671875 | 3 |
visualizer/__init__.py | AndreasMadsen/bachelor-code | 1 | 13445 |
from graph.graph_server import GraphServer
__all__ = ['GraphServer']
| 1.117188 | 1 |
djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 0 | 13446 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-15 16:23
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import djangoplicity.archives.base
import djangoplicity.archives.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('media', '0021_auto_20170207_1749'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('biography', models.TextField(blank=True)),
('photo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='media.Image')),
],
),
migrations.CreateModel(
name='AuthorDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, help_text='Optional description, e.g.: "Author: ", or "Interview with"', max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Author')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('footer', models.TextField(blank=True, help_text='Optional footer added to the bottom of posts')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('slug', models.SlugField(help_text='Used for the URL', primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('subtitle', models.CharField(blank=True, help_text='Optional subtitle', max_length=255)),
('lede', models.TextField()),
('body', models.TextField()),
('discover_box', models.TextField(blank=True)),
('numbers_box', models.TextField(blank=True)),
('links', models.TextField(blank=True)),
('release_date', djangoplicity.archives.fields.ReleaseDateTimeField(blank=True, db_index=True, null=True)),
('embargo_date', djangoplicity.archives.fields.ReleaseDateTimeField(blank=True, db_index=True, null=True)),
('published', models.BooleanField(db_index=True, default=False, verbose_name='Published')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('release_task_id', models.CharField(blank=True, max_length=64, null=True)),
('embargo_task_id', models.CharField(blank=True, max_length=64, null=True)),
('checksums', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('authors', models.ManyToManyField(through='blog.AuthorDescription', to='blog.Author')),
('banner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='media.Image', verbose_name='Banner Image')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
options={
'ordering': ('-release_date',),
},
bases=(djangoplicity.archives.base.ArchiveModel, models.Model),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
migrations.AddField(
model_name='authordescription',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post'),
),
]
| 1.898438 | 2 |
picklesize/test_picklesize.py | pydron/picklesize | 0 | 13447 | '''
Created on 20.07.2015
@author: stefan
'''
import unittest
import pickle
import picklesize
import copy_reg
class TestEstimator(unittest.TestCase):
def setUp(self):
self.target = picklesize.PickleSize()
def compare(self, obj):
data = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
expected = len(data)
actual = self.target.picklesize(obj, pickle.HIGHEST_PROTOCOL)
self.assertEqual(expected, actual, "Wrong estimate (%s instead of %s) for %r." %
(actual, expected, obj))
def test_None(self):
self.compare(None)
def test_True(self):
self.compare(True)
def test_False(self):
self.compare(False)
def test_int(self):
self.compare(0)
self.compare(1)
self.compare(0xFF-1)
self.compare(0xFF)
self.compare(0xFF+1)
self.compare(0xFFFF-1)
self.compare(0xFFFF)
self.compare(0xFFFF+1)
self.compare(-0xFF-1)
self.compare(-0xFF)
self.compare(-0xFF+1)
self.compare(-0xFFFF-1)
self.compare(-0xFFFF)
self.compare(-0xFFFF+1)
def test_long(self):
self.compare(0L)
self.compare(1L)
self.compare(10L**100)
self.compare(10L**1000)
def test_float(self):
self.compare(0.0)
self.compare(-42.42)
def test_string(self):
self.compare("")
self.compare(255*"x")
self.compare(256*"x")
self.compare(257*"x")
def test_unicode(self):
self.compare(u"")
self.compare(255*u"x")
self.compare(256*u"x")
self.compare(257*u"x")
def test_tuple(self):
self.compare(tuple())
self.compare((1,))
self.compare((1,2))
self.compare((1,2,3))
self.compare((1,2,3,4))
def test_list(self):
self.compare([])
self.compare([1])
self.compare(999*[1])
self.compare(1000*[1])
self.compare(1001*[1])
self.compare(1002*[1])
self.compare(5412*[1])
def test_dict(self):
self.compare({})
self.compare({1:2})
self.compare({1:1, 2:2})
def test_instance(self):
self.compare(OldStyle_WithAttribs())
self.compare(OldStyle_WithInit())
def test_Type(self):
self.compare(long)
self.compare(OldStyle_WithAttribs)
self.compare(global_function)
self.compare(max)
def test_Ref(self):
x = "abc"
self.compare([x,x])
def test_Reducer(self):
self.compare(NewStyle_Reducer())
def test_NewStyleInstance(self):
self.compare(NewStyle_WithAttribs())
def test_numpy(self):
import numpy as np
self.compare(np.ones((10,10)))
self.compare(np.ones((10,10))[0:5,:])
self.compare(np.ones((10,10))[:,0:5])
def test_numpy_multiple_arrays(self):
import numpy as np
self.compare([np.ones((10,10)), np.ones((10,10))])
def test_numpy_large(self):
import numpy as np
self.compare(np.ones(1024*1024))
class TestFast(TestEstimator):
def setUp(self):
self.target = picklesize.FastPickleSize()
def compare(self, obj):
data = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
expected = len(data)
actual = self.target.picklesize(obj, pickle.HIGHEST_PROTOCOL)
self.assertLessEqual(actual, 2*expected+100, "Over estimate (%s instead of %s) for %r." %
(actual, expected, obj))
self.assertGreaterEqual(actual, 0.5*expected-100, "Gross under estimate (%s instead of %s) for %r." %
(actual, expected, obj))
class OldStyle_WithAttribs():
def __init__(self):
self.a = 12
self.b = 42
class OldStyle_WithInit():
def __getinitargs__(self):
return (1,2,3)
class NewStyle_Reducer(object):
pass
class NewStyle_WithAttribs(object):
def __init__(self):
self.a = 12
self.b = 42
def tuple_reducer(obj):
return (NewStyle_Reducer, tuple())
copy_reg.pickle(NewStyle_Reducer, tuple_reducer)
def global_function():
pass | 2.515625 | 3 |
setup.py | extensive-nlp/ttc_nlp | 0 | 13448 | <reponame>extensive-nlp/ttc_nlp
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Setup process."""
from io import open
from os import path
from setuptools import find_packages, setup
with open(
path.join(path.abspath(path.dirname(__file__)), "README.md"), encoding="utf-8"
) as f:
long_description = f.read()
setup(
# Basic project information
name="ttctext",
version="0.0.1",
# Authorship and online reference
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/extensive-nlp/ttc_nlp",
# Detailled description
description="TTC NLP Module",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="sample setuptools development",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# Package configuration
packages=find_packages(exclude=("tests",)),
include_package_data=True,
python_requires=">= 3.6",
install_requires=[
"torch>=1.9.0",
"torchtext>=0.10.0",
"torchmetrics>=0.4.1",
"omegaconf>=2.1.0",
"pytorch-lightning>=1.3.8",
"gdown>=3.13.0",
"spacy>=3.1.0",
"pandas~=1.1.0",
"seaborn>=0.11.1",
"matplotlib>=3.1.3",
"tqdm>=4.61.2",
"scikit-learn~=0.24.2",
],
# Licensing and copyright
license="Apache 2.0",
)
| 1.570313 | 2 |
auxein/fitness/__init__.py | auxein/auxein | 1 | 13449 | # flake8: noqa
from .core import Fitness
from .kernel_based import GlobalMinimum
from .observation_based import ObservationBasedFitness, MultipleLinearRegression, SimplePolynomialRegression, MultipleLinearRegression | 0.988281 | 1 |
steelpy/codes/main.py | svortega/steelpy | 4 | 13450 | # Copyright (c) 2019-2020 steelpy
# Python stdlib imports
# package imports
#from steelpy.codes.aisc.aisc360 import AISC_360_16
#from steelpy.codes.aisc.aisc335 import AISC_335_89
#from steelpy.codes.iso.ISO19902 import ISOCodeCheck
from steelpy.codes.piping.pipeline import Pipeline_Assessment
#from steelpy.codes.api.wsd_22ed import APIwsd22ed
from steelpy.codes.dnv.pannel import CodeCheckPanel
#
#from steelpy.process.units.main import Units
#from steelpy.material.material import Material
#from steelpy.sections.tubular import Tubular
from steelpy.codes.api.main import API_design
class CodeCheck:
"""
"""
def __init__(self):
""""""
#self._units = Units()
pass
#@property
#def units(self):
# """
# """
# return self._units
#
@property
def API(self):
"""
"""
return API_design()
#
@property
def pipe(self):
""" """
return Pipeline_Assessment()
#
def DNV_pannel(self):
""" """
return CodeCheckPanel()
| 2.203125 | 2 |
main.py | soyoung97/MixText | 0 | 13451 | import os
os.system("pip install pytorch_transformers")
import nsml
print(nsml.DATASET_PATH)
os.system('python ./code/train.py --n-labeled 10 --data-path '+ nsml.DATASET_PATH + '/train/ --batch-size 4 --batch-size-u 8 --epochs 20 --val-iteration 1000 --lambda-u 1 --T 0.5 --alpha 16 --mix-layers-set 7 9 12 --lrmain 0.000005 --lrlast 0.00005'
)
| 2.375 | 2 |
test.py | IldusTim/QAStudy | 0 | 13452 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import math
from selenium.webdriver.support.ui import Select
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
link = "http://suninjuly.github.io/explicit_wait2.html"
opt = webdriver.ChromeOptions()
opt.add_experimental_option('w3c', False)
browser = webdriver.Chrome(chrome_options=opt)
browser.implicitly_wait(5, 0.5)
browser.get(link)
button = browser.find_element_by_id("book")
price = WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, "price"),"10000 RUR"))
button.click()
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
browser.find_element_by_class_name("btn-primary").click()
# new_window = browser.window_handles[1]
# browser.switch_to.window(new_window)
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
browser.find_element_by_id("answer").click()
browser.find_element_by_id("answer").send_keys(y)
browser.find_element_by_id("solve").click() | 3.265625 | 3 |
django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 0 | 13453 | <reponame>itsmahadi007/E-Commerce-VueJS-Django
# Generated by Django 3.2.7 on 2021-09-01 17:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField()),
('description', models.TextField(blank=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('image', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('data_added', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='product.category')),
],
options={
'ordering': ('-data_added',),
},
),
]
| 1.734375 | 2 |
kraken/ketos.py | zjsteyn/kraken | 0 | 13454 | <reponame>zjsteyn/kraken
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import json
import glob
import uuid
import click
import logging
import unicodedata
from click import open_file
from bidi.algorithm import get_display
from typing import cast, Set, List, IO, Any
from kraken.lib import log
from kraken.lib.exceptions import KrakenCairoSurfaceException
from kraken.lib.exceptions import KrakenEncodeException
from kraken.lib.exceptions import KrakenInputException
APP_NAME = 'kraken'
logger = logging.getLogger('kraken')
def message(msg, **styles):
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
@click.group()
@click.version_option()
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducable random splits of data')
def cli(verbose, seed):
if seed:
import numpy.random
numpy.random.seed(seed)
from torch import manual_seed
manual_seed(seed)
log.set_logger(logger, level=30-min(10*verbose, 20))
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
@cli.command('train')
@click.pass_context
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')
@click.option('-s', '--spec', show_default=True,
default='[1,48,0,1 Cr3,3,32 Do0.1,2 Mp2,2 Cr3,3,64 Do0.1,2 Mp2,2 S1(1x12)1,3 Lbx100 Do]',
help='VGSL spec of the network to train. CTC layer will be added automatically.')
@click.option('-a', '--append', show_default=True, default=None, type=click.INT,
help='Removes layers before argument and then appends spec. Only works when loading an existing model')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs during training')
@click.option('-q', '--quit', show_default=True, default='early', type=click.Choice(['early', 'dumb']),
help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs')
@click.option('-N', '--epochs', show_default=True, default=-1, help='Number of epochs to train for')
@click.option('--lag', show_default=True, default=5, help='Number of evaluations (--report frequence) to wait before stopping training without improvement')
@click.option('--min-delta', show_default=True, default=None, type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--optimizer', show_default=True, default='Adam', type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer')
@click.option('-r', '--lrate', show_default=True, default=2e-3, help='Learning rate')
@click.option('-m', '--momentum', show_default=True, default=0.9, help='Momentum')
@click.option('-w', '--weight-decay', show_default=True, default=0.0, help='Weight decay')
@click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle']), default='constant',
help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.')
@click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set')
@click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),
default=None, help='Ground truth normalization')
@click.option('-n', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True),
help='Load a codec JSON definition (invalid if loading existing model)')
@click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']),
help='Codec/output layer resizing option. If set to `add` code '
'points will be added, `both` will set the layer to match exactly '
'the training data, `fail` will abort if training data and model '
'codec do not match.')
@click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('--preload/--no-preload', show_default=True, default=None, help='Hard enable/disable for training data preloading')
@click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')
#@click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,
# help='When loading an existing model, retrieve hyperparameters from the model')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, pad, output, spec, append, load, freq, quit, epochs,
lag, min_delta, device, optimizer, lrate, momentum, weight_decay,
schedule, partition, normalization, normalize_whitespace, codec,
resize, reorder, training_files, evaluation_files, preload, threads,
ground_truth):
"""
Trains a model from image-text pairs.
"""
if not load and append:
raise click.BadOptionUsage('append', 'append option requires loading an existing model')
if resize != 'fail' and not load:
raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')
import re
import torch
import shutil
import numpy as np
from torch.utils.data import DataLoader
from kraken.lib import models, vgsl, train
from kraken.lib.util import make_printable
from kraken.lib.train import EarlyStopping, EpochStopping, TrainStopper, TrainScheduler, add_1cycle
from kraken.lib.codec import PytorchCodec
from kraken.lib.dataset import GroundTruthDataset, generate_input_transforms
logger.info('Building ground truth set from {} line images'.format(len(ground_truth) + len(training_files)))
completed_epochs = 0
# load model if given. if a new model has to be created we need to do that
# after data set initialization, otherwise to output size is still unknown.
nn = None
#hyper_fields = ['freq', 'quit', 'epochs', 'lag', 'min_delta', 'optimizer', 'lrate', 'momentum', 'weight_decay', 'schedule', 'partition', 'normalization', 'normalize_whitespace', 'reorder', 'preload', 'completed_epochs', 'output']
if load:
logger.info('Loading existing model from {} '.format(load))
message('Loading existing model from {}'.format(load), nl=False)
nn = vgsl.TorchVGSLModel.load_model(load)
#if nn.user_metadata and load_hyper_parameters:
# for param in hyper_fields:
# if param in nn.user_metadata:
# logger.info('Setting \'{}\' to \'{}\''.format(param, nn.user_metadata[param]))
# message('Setting \'{}\' to \'{}\''.format(param, nn.user_metadata[param]))
# locals()[param] = nn.user_metadata[param]
message('\u2713', fg='green', nl=False)
# preparse input sizes from vgsl string to seed ground truth data set
# sizes and dimension ordering.
if not nn:
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise click.BadOptionUsage('spec', 'VGSL spec {} not bracketed'.format(spec))
blocks = spec[1:-1].split(' ')
m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0])
if not m:
raise click.BadOptionUsage('spec', 'Invalid input spec {}'.format(blocks[0]))
batch, height, width, channels = [int(x) for x in m.groups()]
else:
batch, channels, height, width = nn.input
try:
transforms = generate_input_transforms(batch, height, width, channels, pad)
except KrakenInputException as e:
raise click.BadOptionUsage('spec', str(e))
# disable automatic partition when given evaluation set explicitly
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
# merge training_files into ground_truth list
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
np.random.shuffle(ground_truth)
if len(ground_truth) > 2500 and not preload:
logger.info('Disabling preloading for large (>2500) training data set. Enable by setting --preload parameter')
preload = False
# implicit preloading enabled for small data sets
if preload is None:
preload = True
tr_im = ground_truth[:int(len(ground_truth) * partition)]
if evaluation_files:
logger.debug('Using {} lines from explicit eval set'.format(len(evaluation_files)))
te_im = evaluation_files
else:
te_im = ground_truth[int(len(ground_truth) * partition):]
logger.debug('Taking {} lines from training for evaluation'.format(len(te_im)))
# set multiprocessing tensor sharing strategy
if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():
logger.debug('Setting multiprocessing tensor sharing strategy to file_system')
torch.multiprocessing.set_sharing_strategy('file_system')
gt_set = GroundTruthDataset(normalization=normalization,
whitespace_normalization=normalize_whitespace,
reorder=reorder,
im_transforms=transforms,
preload=preload)
with log.progressbar(tr_im, label='Building training set') as bar:
for im in bar:
logger.debug('Adding line {} to training set'.format(im))
try:
gt_set.add(im)
except FileNotFoundError as e:
logger.warning('{}: {}. Skipping.'.format(e.strerror, e.filename))
except KrakenInputException as e:
logger.warning(str(e))
val_set = GroundTruthDataset(normalization=normalization,
whitespace_normalization=normalize_whitespace,
reorder=reorder,
im_transforms=transforms,
preload=preload)
with log.progressbar(te_im, label='Building validation set') as bar:
for im in bar:
logger.debug('Adding line {} to validation set'.format(im))
try:
val_set.add(im)
except FileNotFoundError as e:
logger.warning('{}: {}. Skipping.'.format(e.strerror, e.filename))
except KrakenInputException as e:
logger.warning(str(e))
logger.info('Training set {} lines, validation set {} lines, alphabet {} symbols'.format(len(gt_set._images), len(val_set._images), len(gt_set.alphabet)))
alpha_diff_only_train = set(gt_set.alphabet).difference(set(val_set.alphabet))
alpha_diff_only_val = set(val_set.alphabet).difference(set(gt_set.alphabet))
if alpha_diff_only_train:
logger.warning('alphabet mismatch: chars in training set only: {} (not included in accuracy test during training)'.format(alpha_diff_only_train))
if alpha_diff_only_val:
logger.warning('alphabet mismatch: chars in validation set only: {} (not trained)'.format(alpha_diff_only_val))
logger.info('grapheme\tcount')
for k, v in sorted(gt_set.alphabet.items(), key=lambda x: x[1], reverse=True):
char = make_printable(k)
if char == k:
char = '\t' + char
logger.info(u'{}\t{}'.format(char, v))
logger.debug('Encoding training set')
# use model codec when given
if append:
# is already loaded
nn = cast(vgsl.TorchVGSLModel, nn)
gt_set.encode(codec)
message('Slicing and dicing model ', nl=False)
# now we can create a new model
spec = '[{} O1c{}]'.format(spec[1:-1], gt_set.codec.max_label()+1)
logger.info('Appending {} to existing model {} after {}'.format(spec, nn.spec, append))
nn.append(append, spec)
nn.add_codec(gt_set.codec)
message('\u2713', fg='green')
logger.info('Assembled model spec: {}'.format(nn.spec))
elif load:
# is already loaded
nn = cast(vgsl.TorchVGSLModel, nn)
# prefer explicitly given codec over network codec if mode is 'both'
codec = codec if (codec and resize == 'both') else nn.codec
try:
gt_set.encode(codec)
except KrakenEncodeException as e:
message('Network codec not compatible with training set')
alpha_diff = set(gt_set.alphabet).difference(set(codec.c2l.keys()))
if resize == 'fail':
logger.error('Training data and model codec alphabets mismatch: {}'.format(alpha_diff))
ctx.exit(code=1)
elif resize == 'add':
message('Adding missing labels to network ', nl=False)
logger.info('Resizing codec to include {} new code points'.format(len(alpha_diff)))
codec.c2l.update({k: [v] for v, k in enumerate(alpha_diff, start=codec.max_label()+1)})
nn.add_codec(PytorchCodec(codec.c2l))
logger.info('Resizing last layer in network to {} outputs'.format(codec.max_label()+1))
nn.resize_output(codec.max_label()+1)
gt_set.encode(nn.codec)
message('\u2713', fg='green')
elif resize == 'both':
message('Fitting network exactly to training set ', nl=False)
logger.info('Resizing network or given codec to {} code sequences'.format(len(gt_set.alphabet)))
gt_set.encode(None)
ncodec, del_labels = codec.merge(gt_set.codec)
logger.info('Deleting {} output classes from network ({} retained)'.format(len(del_labels), len(codec)-len(del_labels)))
gt_set.encode(ncodec)
nn.resize_output(ncodec.max_label()+1, del_labels)
message('\u2713', fg='green')
else:
raise click.BadOptionUsage('resize', 'Invalid resize value {}'.format(resize))
else:
gt_set.encode(codec)
logger.info('Creating new model {} with {} outputs'.format(spec, gt_set.codec.max_label()+1))
spec = '[{} O1c{}]'.format(spec[1:-1], gt_set.codec.max_label()+1)
nn = vgsl.TorchVGSLModel(spec)
# initialize weights
message('Initializing model ', nl=False)
nn.init_weights()
nn.add_codec(gt_set.codec)
# initialize codec
message('\u2713', fg='green')
# half the number of data loading processes if device isn't cuda and we haven't enabled preloading
if device == 'cpu' and not preload:
loader_threads = threads // 2
else:
loader_threads = threads
train_loader = DataLoader(gt_set, batch_size=1, shuffle=True, num_workers=loader_threads, pin_memory=True)
threads -= loader_threads
# don't encode validation set as the alphabets may not match causing encoding failures
val_set.training_set = list(zip(val_set._images, val_set._gt))
logger.debug('Constructing {} optimizer (lr: {}, momentum: {})'.format(optimizer, lrate, momentum))
# set mode to trainindg
nn.train()
# set number of OpenMP threads
logger.debug('Set OpenMP threads to {}'.format(threads))
nn.set_num_threads(threads)
logger.debug('Moving model to device {}'.format(device))
optim = getattr(torch.optim, optimizer)(nn.nn.parameters(), lr=0)
if 'accuracy' not in nn.user_metadata:
nn.user_metadata['accuracy'] = []
tr_it = TrainScheduler(optim)
if schedule == '1cycle':
add_1cycle(tr_it, int(len(gt_set) * epochs), lrate, momentum, momentum - 0.10, weight_decay)
else:
# constant learning rate scheduler
tr_it.add_phase(1, (lrate, lrate), (momentum, momentum), weight_decay, train.annealing_const)
if quit == 'early':
st_it = EarlyStopping(min_delta, lag)
elif quit == 'dumb':
st_it = EpochStopping(epochs - completed_epochs)
else:
raise click.BadOptionUsage('quit', 'Invalid training interruption scheme {}'.format(quit))
#for param in hyper_fields:
# logger.debug('Setting \'{}\' to \'{}\' in model metadata'.format(param, locals()[param]))
# nn.user_metadata[param] = locals()[param]
trainer = train.KrakenTrainer(model=nn,
optimizer=optim,
device=device,
filename_prefix=output,
event_frequency=freq,
train_set=train_loader,
val_set=val_set,
stopper=st_it)
trainer.add_lr_scheduler(tr_it)
with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'),
length=trainer.event_it, show_pos=True) as bar:
def _draw_progressbar():
bar.update(1)
def _print_eval(epoch, accuracy, chars, error):
message('Accuracy report ({}) {:0.4f} {} {}'.format(epoch, accuracy, chars, error))
# reset progress bar
bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞')
bar.pos = 0
bar.finished = False
trainer.run(_print_eval, _draw_progressbar)
if quit == 'early':
message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))
logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))
shutil.copy('{}_{}.mlmodel'.format(output, trainer.stopper.best_epoch), '{}_best.mlmodel'.format(output))
@cli.command('test')
@click.pass_context
@click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True),
multiple=True, help='Model(s) to evaluate')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data.')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.')
@click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def test(ctx, model, evaluation_files, device, pad, threads, test_set):
"""
Evaluate on a test set.
"""
if not model:
raise click.UsageError('No model to evaluate given.')
import numpy as np
from PIL import Image
from kraken.serialization import render_report
from kraken.lib import models
from kraken.lib.dataset import global_align, compute_confusions, generate_input_transforms
logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files)))
nn = {}
for p in model:
message('Loading model {}\t'.format(p), nl=False)
nn[p] = models.load_any(p)
message('\u2713', fg='green')
test_set = list(test_set)
# set number of OpenMP threads
logger.debug('Set OpenMP threads to {}'.format(threads))
next(iter(nn.values())).nn.set_num_threads(threads)
# merge training_files into ground_truth list
if evaluation_files:
test_set.extend(evaluation_files)
if len(test_set) == 0:
raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')
def _get_text(im):
with open(os.path.splitext(im)[0] + '.gt.txt', 'r') as fp:
return get_display(fp.read())
acc_list = []
for p, net in nn.items():
algn_gt: List[str] = []
algn_pred: List[str] = []
chars = 0
error = 0
message('Evaluating {}'.format(p))
logger.info('Evaluating {}'.format(p))
batch, channels, height, width = net.nn.input
ts = generate_input_transforms(batch, height, width, channels, pad)
with log.progressbar(test_set, label='Evaluating') as bar:
for im_path in bar:
i = ts(Image.open(im_path))
text = _get_text(im_path)
pred = net.predict_string(i)
chars += len(text)
c, algn1, algn2 = global_align(text, pred)
algn_gt.extend(algn1)
algn_pred.extend(algn2)
error += c
acc_list.append((chars-error)/chars)
confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred)
rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs)
logger.info(rep)
message(rep)
logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
@cli.command('extract')
@click.pass_context
@click.option('-b', '--binarize/--no-binarize', show_default=True, default=True,
help='Binarize color/grayscale images')
@click.option('-u', '--normalization', show_default=True,
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Normalize ground truth')
@click.option('-s', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('-n', '--reorder/--no-reorder', default=False, show_default=True,
help='Reorder transcribed lines to display order')
@click.option('-r', '--rotate/--no-rotate', default=True, show_default=True,
help='Skip rotation of vertical lines')
@click.option('-o', '--output', type=click.Path(), default='training', show_default=True,
help='Output directory')
@click.option('--format', default='{idx:06d}', show_default=True, help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)')
@click.argument('transcriptions', nargs=-1, type=click.File(lazy=True))
def extract(ctx, binarize, normalization, normalize_whitespace, reorder,
rotate, output, format, transcriptions):
"""
Extracts image-text pairs from a transcription environment created using
``ketos transcribe``.
"""
import regex
import base64
from io import BytesIO
from PIL import Image
from lxml import html, etree
from kraken import binarization
try:
os.mkdir(output)
except Exception:
pass
text_transforms = []
if normalization:
text_transforms.append(lambda x: unicodedata.normalize(normalization, x))
if normalize_whitespace:
text_transforms.append(lambda x: regex.sub('\s', ' ', x))
if reorder:
text_transforms.append(get_display)
idx = 0
manifest = []
with log.progressbar(transcriptions, label='Reading transcriptions') as bar:
for fp in bar:
logger.info('Reading {}'.format(fp.name))
doc = html.parse(fp)
etree.strip_tags(doc, etree.Comment)
td = doc.find(".//meta[@itemprop='text_direction']")
if td is None:
td = 'horizontal-lr'
else:
td = td.attrib['content']
im = None
dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())}
for section in doc.xpath('//section'):
img = section.xpath('.//img')[0].get('src')
fd = BytesIO(base64.b64decode(img.split(',')[1]))
im = Image.open(fd)
if not im:
logger.info('Skipping {} because image not found'.format(fp.name))
break
if binarize:
im = binarization.nlbin(im)
for line in section.iter('li'):
if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())):
dest_dict['idx'] = idx
dest_dict['uuid'] = str(uuid.uuid4())
logger.debug('Writing line {:06d}'.format(idx))
l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')])
if rotate and td.startswith('vertical'):
im.rotate(90, expand=True)
l_img.save(('{output}/' + format + '.png').format(**dest_dict))
manifest.append((format + '.png').format(**dest_dict))
text = u''.join(line.itertext()).strip()
for func in text_transforms:
text = func(text)
with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t:
t.write(text.encode('utf-8'))
idx += 1
logger.info('Extracted {} lines'.format(idx))
with open('{}/manifest.txt'.format(output), 'w') as fp:
fp.write('\n'.join(manifest))
@cli.command('transcribe')
@click.pass_context
@click.option('-d', '--text-direction', default='horizontal-lr',
type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction', show_default=True)
@click.option('--scale', default=None, type=click.FLOAT)
@click.option('--bw/--orig', default=True, show_default=True,
help="Put nonbinarized images in output")
@click.option('-m', '--maxcolseps', default=2, type=click.INT, show_default=True)
@click.option('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True)
@click.option('-f', '--font', default='',
help='Font family to use')
@click.option('-fs', '--font-style', default=None,
help='Font style to use')
@click.option('-p', '--prefill', default=None,
help='Use given model for prefill mode.')
@click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-l', '--lines', type=click.Path(exists=True), show_default=True,
help='JSON file containing line coordinates')
@click.option('-o', '--output', type=click.File(mode='wb'), default='transcription.html',
help='Output file', show_default=True)
@click.argument('images', nargs=-1, type=click.File(mode='rb', lazy=True))
def transcription(ctx, text_direction, scale, bw, maxcolseps,
black_colseps, font, font_style, prefill, pad, lines, output,
images):
"""
Creates transcription environments for ground truth generation.
"""
from PIL import Image
from kraken import rpred
from kraken import pageseg
from kraken import transcribe
from kraken import binarization
from kraken.lib import models
from kraken.lib.util import is_bitonal
ti = transcribe.TranscriptionInterface(font, font_style)
if len(images) > 1 and lines:
raise click.UsageError('--lines option is incompatible with multiple image files')
if prefill:
logger.info('Loading model {}'.format(prefill))
message('Loading RNN', nl=False)
prefill = models.load_any(prefill)
message('\u2713', fg='green')
with log.progressbar(images, label='Reading images') as bar:
for fp in bar:
logger.info('Reading {}'.format(fp.name))
im = Image.open(fp)
if im.mode not in ['1', 'L', 'P', 'RGB']:
logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode))
im = im.convert('RGB')
logger.info('Binarizing page')
im_bin = binarization.nlbin(im)
im_bin = im_bin.convert('1')
logger.info('Segmenting page')
if not lines:
res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad)
else:
with open_file(lines, 'r') as fp:
try:
fp = cast(IO[Any], fp)
res = json.load(fp)
except ValueError as e:
raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e)))
if prefill:
it = rpred.rpred(prefill, im_bin, res)
preds = []
logger.info('Recognizing')
for pred in it:
logger.debug('{}'.format(pred.prediction))
preds.append(pred)
ti.add_page(im, res, records=preds)
else:
ti.add_page(im, res)
fp.close()
logger.info('Writing transcription to {}'.format(output.name))
message('Writing output', nl=False)
ti.write(output)
message('\u2713', fg='green')
@cli.command('linegen')
@click.pass_context
@click.option('-f', '--font', default='sans',
help='Font family to render texts in.')
@click.option('-n', '--maxlines', type=click.INT, default=0,
help='Maximum number of lines to generate')
@click.option('-e', '--encoding', default='utf-8',
help='Decode text files with given codec.')
@click.option('-u', '--normalization',
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Normalize ground truth')
@click.option('-ur', '--renormalize',
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Renormalize text for rendering purposes.')
@click.option('--reorder/--no-reorder', default=False, help='Reorder code points to display order')
@click.option('-fs', '--font-size', type=click.INT, default=32,
help='Font size to render texts in.')
@click.option('-fw', '--font-weight', type=click.INT, default=400,
help='Font weight to render texts in.')
@click.option('-l', '--language',
help='RFC-3066 language tag for language-dependent font shaping')
@click.option('-ll', '--max-length', type=click.INT, default=None,
help="Discard lines above length (in Unicode codepoints).")
@click.option('--strip/--no-strip', help="Remove whitespace from start and end "
"of lines.")
@click.option('-d', '--disable-degradation', is_flag=True, help='Dont degrade '
'output lines.')
@click.option('-a', '--alpha', type=click.FLOAT, default=1.5,
help="Mean of folded normal distribution for sampling foreground pixel flip probability")
@click.option('-b', '--beta', type=click.FLOAT, default=1.5,
help="Mean of folded normal distribution for sampling background pixel flip probability")
@click.option('-d', '--distort', type=click.FLOAT, default=1.0,
help='Mean of folded normal distribution to take distortion values from')
@click.option('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0,
help='Mean of folded normal distribution to take standard deviations for the '
'Gaussian kernel from')
@click.option('--legacy/--no-legacy', default=False,
help='Use ocropy-style degradations')
@click.option('-o', '--output', type=click.Path(), default='training_data',
help='Output directory')
@click.argument('text', nargs=-1, type=click.Path(exists=True))
def line_generator(ctx, font, maxlines, encoding, normalization, renormalize,
reorder, font_size, font_weight, language, max_length, strip,
disable_degradation, alpha, beta, distort, distortion_sigma,
legacy, output, text):
"""
Generates artificial text line training data.
"""
import errno
import numpy as np
from kraken import linegen
from kraken.lib.util import make_printable
lines: Set[str] = set()
if not text:
return
with log.progressbar(text, label='Reading texts') as bar:
for t in text:
with click.open_file(t, encoding=encoding) as fp:
logger.info('Reading {}'.format(t))
for l in fp:
lines.add(l.rstrip('\r\n'))
if normalization:
lines = set([unicodedata.normalize(normalization, line) for line in lines])
if strip:
lines = set([line.strip() for line in lines])
if max_length:
lines = set([line for line in lines if len(line) < max_length])
logger.info('Read {} lines'.format(len(lines)))
message('Read {} unique lines'.format(len(lines)))
if maxlines and maxlines < len(lines):
message('Sampling {} lines\t'.format(maxlines), nl=False)
llist = list(lines)
lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines))
message('\u2713', fg='green')
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# calculate the alphabet and print it for verification purposes
alphabet: Set[str] = set()
for line in lines:
alphabet.update(line)
chars = []
combining = []
for char in sorted(alphabet):
k = make_printable(char)
if k != char:
combining.append(k)
else:
chars.append(k)
message('Σ (len: {})'.format(len(alphabet)))
message('Symbols: {}'.format(''.join(chars)))
if combining:
message('Combining Characters: {}'.format(', '.join(combining)))
lg = linegen.LineGenerator(font, font_size, font_weight, language)
with log.progressbar(lines, label='Writing images') as bar:
for idx, line in enumerate(bar):
logger.info(line)
try:
if renormalize:
im = lg.render_line(unicodedata.normalize(renormalize, line))
else:
im = lg.render_line(line)
except KrakenCairoSurfaceException as e:
logger.info('{}: {} {}'.format(e.message, e.width, e.height))
continue
if not disable_degradation and not legacy:
im = linegen.degrade_line(im, alpha=alpha, beta=beta)
im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma)))
elif legacy:
im = linegen.ocropy_degrade(im)
im.save('{}/{:06d}.png'.format(output, idx))
with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp:
if reorder:
fp.write(get_display(line).encode('utf-8'))
else:
fp.write(line.encode('utf-8'))
@cli.command('publish')
@click.pass_context
@click.option('-i', '--metadata', show_default=True,
type=click.File(mode='r', lazy=True), help='Metadata for the '
'model. Will be prompted from the user if not given')
@click.option('-a', '--access-token', prompt=True, help='Zenodo access token')
@click.argument('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False))
def publish(ctx, metadata, access_token, model):
"""
Publishes a model on the zenodo model repository.
"""
import json
import pkg_resources
from functools import partial
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from kraken import repo
from kraken.lib import models
with pkg_resources.resource_stream(__name__, 'metadata.schema.json') as fp:
schema = json.load(fp)
nn = models.load_any(model)
if not metadata:
author = click.prompt('author')
affiliation = click.prompt('affiliation')
summary = click.prompt('summary')
description = click.edit('Write long form description (training data, transcription standards) of the model here')
accuracy_default = None
# take last accuracy measurement in model metadata
if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']:
accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100
accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default)
script = [click.prompt('script', type=click.Choice(sorted(schema['properties']['script']['items']['enum'])), show_choices=True)]
license = click.prompt('license', type=click.Choice(sorted(schema['properties']['license']['enum'])), show_choices=True)
metadata = {
'authors': [{'name': author, 'affiliation': affiliation}],
'summary': summary,
'description': description,
'accuracy': accuracy,
'license': license,
'script': script,
'name': os.path.basename(model),
'graphemes': ['a']
}
while True:
try:
validate(metadata, schema)
except ValidationError as e:
message(e.message)
metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str)
continue
break
else:
metadata = json.load(metadata)
validate(metadata, schema)
metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())]
oid = repo.publish_model(model, metadata, access_token, partial(message, '.', nl=False))
print('\nmodel PID: {}'.format(oid))
if __name__ == '__main__':
cli()
| 2.015625 | 2 |
util/visualize_loss.py | whq-hqw/detr_change | 2 | 13455 | <filename>util/visualize_loss.py
from os.path import *
import glob
import json
import numpy as np
from util.plot_utils import plot_curves, plot_multi_loss_distribution
TMPJPG = expanduser("~/Pictures/")
def plot_multi_logs(exp_name, keys, save_name, epoch, addition_len):
root_path = expanduser("/raid/dataset/detection/detr_exp")
folder_candidate = glob.glob(join(root_path, "*"))
folders = []
for name in exp_name:
for folder in folder_candidate:
if folder[-len(name):] == name:
folders.append(folder)
break
assert len(exp_name) == len(folders)
exp_data = np.stack(get_experiment_logs(folders, keys, epoch, addition_len)).transpose((1, 0, 2))
if len(addition_len) > 0 and "test_coco_eval_bbox" in keys:
idx = keys.index("test_coco_eval_bbox")
addition_len.extend(keys[idx + 1:])
keys = keys[:idx] + addition_len
plot_multi_loss_distribution(
multi_line_data=exp_data,
multi_line_labels=[exp_name] * len(keys),
save_path=TMPJPG, window=5, name=save_name,
titles=keys, fig_size=(12, 3 * len(keys)), legend_loc="upper left"
)
def get_experiment_logs(folders, keys, epoch, addition_len):
exp_data = []
for folder in folders:
print(folder)
contents = np.array(load_log(join(folder, "log.txt"), keys, addition_len))
if contents.shape[-1] >= epoch:
contents = contents[:, :epoch]
else:
zeros = np.zeros((contents.shape[0], epoch - contents.shape[1]), dtype=contents.dtype)
contents = np.concatenate((contents, zeros), axis = 1)
exp_data.append(contents)
return exp_data
def load_log(path, keys, addition=6):
if "test_coco_eval_bbox" in keys:
contents = [[] for _ in range(len(keys) + len(addition) - 1)]
else:
contents = [[] for _ in range(len(keys))]
with open(path, "r") as txt:
for line in txt.readlines():
data = json.loads(line)
j = 0
for i, key in enumerate(keys):
if key == "test_coco_eval_bbox":
for j in range(len(addition)):
contents[i + j].append(data[key][j])
else:
contents[i + j].append(data[key])
return contents
if __name__ == '__main__':
exp_name = ["be", "be_768", "be_1024", "be_mid_layer_only", "origin"]
keys = ["train_loss_bbox", "train_loss_ce", "train_loss_giou", "test_coco_eval_bbox"]
eval_name = ["AP", "AP50", "AP75", "AP_small", "AP_mid", "AP_Big",
"AR", "AR50", "AR75", "AR_small", "AR_mid", "AR_Big"]
plot_multi_logs(exp_name, keys, save_name="loss", epoch=50, addition_len=eval_name[:6])
| 2.1875 | 2 |
tower_cli/resources/job.py | kedark3/tower-cli | 363 | 13456 | <reponame>kedark3/tower-cli
# Copyright 2015, Ansible, Inc.
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from getpass import getpass
from distutils.version import LooseVersion
import click
from tower_cli import models, get_resource, resources, exceptions as exc
from tower_cli.api import client
from tower_cli.cli import types
from tower_cli.utils import debug, parser
PROMPT_LIST = ['diff_mode', 'limit', 'tags', 'skip_tags', 'job_type', 'verbosity', 'inventory', 'credential']
class Resource(models.ExeResource):
"""A resource for jobs.
This resource has ordinary list and get methods,
but it does not have create or modify.
Instead of being created, a job is launched.
"""
cli_help = 'Launch or monitor jobs.'
endpoint = '/jobs/'
job_template = models.Field(
key='-J',
type=types.Related('job_template'), required=False, display=True
)
job_explanation = models.Field(required=False, display=False, read_only=True)
created = models.Field(required=False, display=True)
status = models.Field(required=False, display=True)
elapsed = models.Field(required=False, display=True, type=float)
@resources.command(
use_fields_as_options=('job_template',)
)
@click.option('--monitor', is_flag=True, default=False,
help='If sent, immediately calls `job monitor` on the newly '
'launched job rather than exiting with a success.')
@click.option('--wait', is_flag=True, default=False,
help='Monitor the status of the job, but do not print '
'while job is in progress.')
@click.option('--timeout', required=False, type=int,
help='If provided with --monitor, this command (not the job)'
' will time out after the given number of seconds. '
'Does nothing if --monitor is not sent.')
@click.option('--no-input', is_flag=True, default=False,
help='Suppress any requests for input.')
@click.option('-e', '--extra-vars', required=False, multiple=True,
help='yaml format text that contains extra variables '
'to pass on. Use @ to get these from a file.')
@click.option('--diff-mode', type=bool, required=False, help='Specify diff mode for job template to run.')
@click.option('--limit', required=False, help='Specify host limit for job template to run.')
@click.option('--tags', required=False, help='Specify tagged actions in the playbook to run.')
@click.option('--skip-tags', required=False, help='Specify tagged actions in the playbook to omit.')
@click.option('--job-type', required=False, type=click.Choice(['run', 'check']),
help='Specify job type for job template to run.')
@click.option('--verbosity', type=int, required=False, help='Specify verbosity of the playbook run.')
@click.option('--inventory', required=False, type=types.Related('inventory'),
help='Specify inventory for job template to run.')
@click.option('--credential', required=False, multiple=True, type=types.Related('credential'),
help='Specify any type of credential(s) for job template to run.')
def launch(self, job_template=None, monitor=False, wait=False,
timeout=None, no_input=True, extra_vars=None, **kwargs):
"""Launch a new job based on a job template.
Creates a new job in Ansible Tower, immediately starts it, and
returns back an ID in order for its status to be monitored.
=====API DOCS=====
Launch a new job based on a job template.
:param job_template: Primary key or name of the job template to launch new job.
:type job_template: str
:param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather
than exiting with a success.
:type monitor: bool
:param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.
:type wait: bool
:param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number
of seconds.
:type timeout: int
:param no_input: Flag that if set, suppress any requests for input.
:type no_input: bool
:param extra_vars: yaml formatted texts that contains extra variables to pass on.
:type extra_vars: array of strings
:param diff_mode: Specify diff mode for job template to run.
:type diff_mode: bool
:param limit: Specify host limit for job template to run.
:type limit: str
:param tags: Specify tagged actions in the playbook to run.
:type tags: str
:param skip_tags: Specify tagged actions in the playbook to omit.
:type skip_tags: str
:param job_type: Specify job type for job template to run.
:type job_type: str
:param verbosity: Specify verbosity of the playbook run.
:type verbosity: int
:param inventory: Specify machine credential for job template to run.
:type inventory: str
:param credential: Specify machine credential for job template to run.
:type credential: str
:returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent
``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of
the two flags are on.
:rtype: dict
=====API DOCS=====
"""
# Get the job template from Ansible Tower.
# This is used as the baseline for starting the job.
jt_resource = get_resource('job_template')
jt = jt_resource.get(job_template)
# Update the job data for special treatment of certain fields
# Special case for job tags, historically just called --tags
tags = kwargs.get('tags', None)
data = {}
if tags:
data['job_tags'] = tags
# Special case for cross-version compatibility with credentials
cred_arg = kwargs.pop('credential', ())
if isinstance(cred_arg, (list, tuple)):
credentials = cred_arg
else:
credentials = [cred_arg]
if credentials:
if 'credentials' in jt['related']:
# Has Tower 3.3 / multi-cred support
# combine user-provided credentials with JT credentials
jt_creds = set(
c['id'] for c in jt['summary_fields']['credentials']
)
kwargs['credentials'] = list(set(credentials) | jt_creds)
else:
if len(credentials) > 1:
raise exc.UsageError(
'Providing multiple credentials on launch can only be '
'done with Tower version 3.3 and higher or recent AWX.'
)
kwargs['credential'] = credentials[0]
# Initialize an extra_vars list that starts with the job template
# preferences first, if they exist
extra_vars_list = []
if 'extra_vars' in data and len(data['extra_vars']) > 0:
# But only do this for versions before 2.3
debug.log('Getting version of Tower.', header='details')
r = client.get('/config/')
if LooseVersion(r.json()['version']) < LooseVersion('2.4'):
extra_vars_list = [data['extra_vars']]
# Add the runtime extra_vars to this list
if extra_vars:
extra_vars_list += list(extra_vars) # accept tuples
# If the job template requires prompting for extra variables,
# do so (unless --no-input is set).
if jt.get('ask_variables_on_launch', False) and not no_input \
and not extra_vars:
# If JT extra_vars are JSON, echo them to user as YAML
initial = parser.process_extra_vars(
[jt['extra_vars']], force_json=False
)
initial = '\n'.join((
'# Specify extra variables (if any) here as YAML.',
'# Lines beginning with "#" denote comments.',
initial,
))
extra_vars = click.edit(initial) or ''
if extra_vars != initial:
extra_vars_list = [extra_vars]
# Data is starting out with JT variables, and we only want to
# include extra_vars that come from the algorithm here.
data.pop('extra_vars', None)
# Replace/populate data fields if prompted.
modified = set()
for resource in PROMPT_LIST:
if jt.pop('ask_' + resource + '_on_launch', False) and not no_input:
resource_object = kwargs.get(resource, None)
if type(resource_object) == types.Related:
resource_class = get_resource(resource)
resource_object = resource_class.get(resource).pop('id', None)
if resource_object is None:
debug.log('{0} is asked at launch but not provided'.
format(resource), header='warning')
elif resource != 'tags':
data[resource] = resource_object
modified.add(resource)
# Dump extra_vars into JSON string for launching job
if len(extra_vars_list) > 0:
data['extra_vars'] = parser.process_extra_vars(
extra_vars_list, force_json=True
)
# Create the new job in Ansible Tower.
start_data = {}
endpoint = '/job_templates/%d/launch/' % jt['id']
if 'extra_vars' in data and len(data['extra_vars']) > 0:
start_data['extra_vars'] = data['extra_vars']
if tags:
start_data['job_tags'] = data['job_tags']
for resource in PROMPT_LIST:
if resource in modified:
start_data[resource] = data[resource]
# There's a non-trivial chance that we are going to need some
# additional information to start the job; in particular, many jobs
# rely on passwords entered at run-time.
#
# If there are any such passwords on this job, ask for them now.
debug.log('Asking for information necessary to start the job.',
header='details')
job_start_info = client.get(endpoint).json()
for password in job_start_info.get('passwords_needed_to_start', []):
start_data[password] = getpass('Password for %s: ' % password)
# Actually start the job.
debug.log('Launching the job.', header='details')
self._pop_none(kwargs)
kwargs.update(start_data)
job_started = client.post(endpoint, data=kwargs)
# Get the job ID from the result.
job_id = job_started.json()['id']
# If returning json indicates any ignored fields, display it in
# verbose mode.
if job_started.text == '':
ignored_fields = {}
else:
ignored_fields = job_started.json().get('ignored_fields', {})
has_ignored_fields = False
for key, value in ignored_fields.items():
if value and value != '{}':
if not has_ignored_fields:
debug.log('List of ignored fields on the server side:',
header='detail')
has_ignored_fields = True
debug.log('{0}: {1}'.format(key, value))
# Get some information about the running job to print
result = self.status(pk=job_id, detail=True)
result['changed'] = True
# If we were told to monitor the job once it started, then call
# monitor from here.
if monitor:
return self.monitor(job_id, timeout=timeout)
elif wait:
return self.wait(job_id, timeout=timeout)
return result
| 1.820313 | 2 |
src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 38 | 13457 | <filename>src/backend/expungeservice/models/charge_types/traffic_offense.py
from dataclasses import dataclass
from typing import Any
from expungeservice.models.charge import ChargeType
from expungeservice.models.charge import ChargeUtil
from expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus
@dataclass(frozen=True)
class TrafficOffense(ChargeType):
type_name: str = "Traffic Offense"
expungement_rules: Any = (
"A conviction for a State or municipal traffic offense is not eligible for expungement under ORS 137.225(7)(a).",
"Common convictions under this category include:",
(
"ul",
(
"Reckless Driving",
"Driving While Suspended",
"Driving Under the Influence of Intoxicants",
"Failure to Perform Duties of a Driver",
"Giving False Information to a Police Officer (when in a car)",
"Fleeing/Attempting to Elude a Police Officer",
"Possession of a Stolen Vehicle",
),
),
"Notably, Unauthorized Use of a Vehicle is not considered a traffic offense.",
"A dismissed traffic offense that is of charge level misdemeanor or higher, other than a Diverted DUII, is identified as a Dismissed Criminal Charge, and is thus eligible.",
)
def type_eligibility(self, disposition):
if ChargeUtil.dismissed(disposition):
raise ValueError("Dismissed criminal charges should have been caught by another class.")
elif ChargeUtil.convicted(disposition):
return TypeEligibility(EligibilityStatus.INELIGIBLE, reason="Ineligible under 137.225(7)(a)")
| 2.5625 | 3 |
os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 35 | 13458 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: import_workload_create_instance
short_description: Create NBD exports of OpenStack volumes
extends_documentation_fragment: openstack
version_added: "2.9.0"
author: "OpenStack tenant migration tools (@os-migrate)"
description:
- "Take an instance from an OS-Migrate YAML structure, and export its volumes over NBD."
options:
auth:
description:
- Dictionary with parameters for chosen auth type on the destination cloud.
required: true
type: dict
auth_type:
description:
- Auth type plugin for destination OpenStack cloud. Can be omitted if using password authentication.
required: false
type: str
region_name:
description:
- Destination OpenStack region name. Can be omitted if using default region.
required: false
type: str
availability_zone:
description:
- Availability zone.
required: false
type: str
cloud:
description:
- Ignored. Present for backwards compatibility.
required: false
type: raw
validate_certs:
description:
- Validate HTTPS certificates when logging in to OpenStack.
required: false
type: bool
data:
description:
- Data structure with server parameters as loaded from OS-Migrate workloads YAML file.
required: true
type: dict
block_device_mapping:
description:
- A block_device_mapping_v2 structure from the transfer_volumes module.
- Used to attach destination volumes to the new instance in the right order.
required: true
type: list
elements: dict
'''
EXAMPLES = '''
main.yml:
- name: validate loaded resources
os_migrate.os_migrate.validate_resource_files:
paths:
- "{{ os_migrate_data_dir }}/workloads.yml"
register: workloads_file_validation
when: import_workloads_validate_file
- name: read workloads resource file
os_migrate.os_migrate.read_resources:
path: "{{ os_migrate_data_dir }}/workloads.yml"
register: read_workloads
- name: get source conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: <PASSWORD>
project_domain_id: default
project_name: migration-source
user_domain_id: default
server_id: ce4dda96-5d8e-4b67-aee2-9845cdc943fe
register: os_src_conversion_host_info
- name: get destination conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: <PASSWORD>
project_domain_id: default
project_name: migration-destination
user_domain_id: default
server_id: 2d2afe57-ace5-4187-8fca-5f10f9059ba1
register: os_dst_conversion_host_info
- name: import workloads
include_tasks: workload.yml
loop: "{{ read_workloads.resources }}"
workload.yml:
- block:
- name: preliminary setup for workload import
os_migrate.os_migrate.import_workload_prelim:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: <PASSWORD>
project_domain_id: default
project_name: migration-destination
user_domain_id: default
validate_certs: False
src_conversion_host: "{{ os_src_conversion_host_info.openstack_conversion_host }}"
src_auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: <PASSWORD>
project_domain_id: default
project_name: migration-source
user_domain_id: default
src_validate_certs: False
data: "{{ item }}"
data_dir: "{{ os_migrate_data_dir }}"
register: prelim
- debug:
msg:
- "{{ prelim.server_name }} log file: {{ prelim.log_file }}"
- "{{ prelim.server_name }} progress file: {{ prelim.state_file }}"
when: prelim.changed
- name: expose source volumes
os_migrate.os_migrate.import_workload_export_volumes:
auth: "{{ os_migrate_src_auth }}"
auth_type: "{{ os_migrate_src_auth_type|default(omit) }}"
region_name: "{{ os_migrate_src_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_src_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_src_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_src_client_cert|default(omit) }}"
client_key: "{{ os_migrate_src_client_key|default(omit) }}"
conversion_host:
"{{ os_src_conversion_host_info.openstack_conversion_host }}"
data: "{{ item }}"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
register: exports
when: prelim.changed
- name: transfer volumes to destination
os_migrate.os_migrate.import_workload_transfer_volumes:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
conversion_host:
"{{ os_dst_conversion_host_info.openstack_conversion_host }}"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
transfer_uuid: "{{ exports.transfer_uuid }}"
src_conversion_host_address:
"{{ os_src_conversion_host_info.openstack_conversion_host.address }}"
volume_map: "{{ exports.volume_map }}"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
register: transfer
when: prelim.changed
- name: create destination instance
os_migrate.os_migrate.import_workload_create_instance:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
block_device_mapping: "{{ transfer.block_device_mapping }}"
register: os_migrate_destination_instance
when: prelim.changed
rescue:
- fail:
msg: "Failed to import {{ item.params.name }}!"
'''
RETURN = '''
server_id:
description: The ID of the newly created server.
returned: On successful creation of migrated server on destination cloud.
type: str
sample: 059635b7-451f-4a64-978a-7c2e9e4c15ff
'''
from ansible.module_utils.basic import AnsibleModule
# Import openstack module utils from ansible_collections.openstack.cloud.plugins as per ansible 3+
try:
from ansible_collections.openstack.cloud.plugins.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
except ImportError:
# If this fails fall back to ansible < 3 imports
from ansible.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
from ansible_collections.os_migrate.os_migrate.plugins.module_utils import server
def run_module():
argument_spec = openstack_full_argument_spec(
auth=dict(type='dict', no_log=True, required=True),
data=dict(type='dict', required=True),
block_device_mapping=dict(type='list', required=True, elements='dict'),
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=argument_spec,
)
sdk, conn = openstack_cloud_from_module(module)
block_device_mapping = module.params['block_device_mapping']
ser_server = server.Server.from_data(module.params['data'])
sdk_server = ser_server.create(conn, block_device_mapping)
# Some info (e.g. flavor ID) will only become available after the
# server is in ACTIVE state, we need to wait for it.
sdk_server = conn.compute.wait_for_server(sdk_server, failures=['ERROR'], wait=600)
dst_ser_server = server.Server.from_sdk(conn, sdk_server)
if sdk_server:
result['changed'] = True
result['server'] = dst_ser_server.data
result['server_id'] = sdk_server.id
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| 1.867188 | 2 |
PythonExercicio/ex081.py | fotavio16/PycharmProjects | 0 | 13459 | <gh_stars>0
valores = []
while True:
num = int(input('Digite um valor: '))
valores.append(num)
cont = str(input('Quer continuar? [S/N] ')).upper()
if cont == 'N':
break
print(f'Você digitou {len(valores)} elememtos.')
valores.sort(reverse=True)
print(f'Os valores em ordem decrescente são {valores}')
if 5 in valores:
print('O valor 5 faz parte da lista!')
else:
print('O valor 5 não faz parte da lista.') | 3.9375 | 4 |
huobi/client/margin.py | codemonkey89/huobi_Python | 1 | 13460 | <gh_stars>1-10
from huobi.utils.input_checker import *
class MarginClient(object):
def __init__(self, **kwargs):
"""
Create the request client instance.
:param kwargs: The option of request connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: The URL name like "https://api.huobi.pro".
init_log: to init logger
"""
self.__kwargs = kwargs
def post_transfer_in_margin(self, symbol: 'str', currency: 'str', amount: 'float') -> int:
"""
Transfer asset from spot account to margin account.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param currency: The currency of transfer. (mandatory)
:param amount: The amount of transfer. (mandatory)
:return:
"""
check_symbol(symbol)
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"symbol": symbol,
"currency": currency,
"amount": amount
}
from huobi.service.margin.post_transfer_in_margin import PostTransferInMarginService
return PostTransferInMarginService(params).request(**self.__kwargs)
def post_transfer_out_margin(self, symbol: 'str', currency: 'str', amount: 'float') -> int:
"""
Transfer asset from margin account to spot account.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param currency: The currency of transfer. (mandatory)
:param amount: The amount of transfer. (mandatory)
:return:
"""
check_symbol(symbol)
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"symbol": symbol,
"currency": currency,
"amount": amount
}
from huobi.service.margin.post_transfer_out_margin import PostTransferOutMarginService
return PostTransferOutMarginService(params).request(**self.__kwargs)
def get_margin_account_balance(self, symbol: 'str') -> list:
"""
Get the Balance of the Margin Loan Account.
:param symbol: The currency, like "btc". (mandatory)
:return: The margin loan account detail list.
"""
check_symbol(symbol)
params = {
"symbol": symbol
}
from huobi.service.margin.get_margin_account_balance import GetMarginAccountBalanceService
return GetMarginAccountBalanceService(params).request(**self.__kwargs)
def post_create_margin_order(self, symbol: 'str', currency: 'str', amount: 'float') -> int:
"""
Submit a request to borrow with margin account.
:param symbol: The trading symbol to borrow margin, e.g. "btcusdt", "bccbtc". (mandatory)
:param currency: The currency to borrow,like "btc". (mandatory)
:param amount: The amount of currency to borrow. (mandatory)
:return: The margin order id.
"""
check_symbol(symbol)
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"symbol": symbol,
"currency" : currency,
"amount" : amount
}
from huobi.service.margin.post_create_margin_order import PostCreateMarginOrderService
return PostCreateMarginOrderService(params).request(**self.__kwargs)
def post_repay_margin_order(self, loan_id: 'int', amount: 'float') -> int:
"""
Get the margin loan records.
:param load_id: The previously returned order id when loan order was created. (mandatory)
:param amount: The amount of currency to repay. (mandatory)
:return: The margin order id.
"""
check_should_not_none(loan_id, "loan_id")
check_should_not_none(amount, "amount")
params = {
"loan_id": loan_id,
"amount": amount
}
from huobi.service.margin.post_repay_margin_order import PostRepayMarginOrderService
return PostRepayMarginOrderService(params).request(**self.__kwargs)
def get_margin_loan_orders(self, symbol: 'str', start_date: 'str' = None, end_date: 'str' = None,
states: 'LoanOrderState' = None, from_id: 'int' = None,
size: 'int' = None, direction: 'QueryDirection' = None) -> list:
"""
Get the margin loan records.
:param symbol: The symbol, like "btcusdt" (mandatory).
:param start_date: The search starts date in format yyyy-mm-dd. (optional).
:param end_date: The search end date in format yyyy-mm-dd.(optional, can be null).
:param states: The loan order states, it could be created, accrual, cleared or invalid. (optional)
:param from_id: Search order id to begin with. (optional)
:param size: The number of orders to return.. (optional)
:param direction: The query direction, prev or next. (optional)
:return: The list of the margin loan records.
"""
check_symbol(symbol)
start_date = format_date(start_date, "start_date")
end_date = format_date(end_date, "end_date")
params = {
"symbol" : symbol,
"start-date" : start_date,
"end-date" : end_date,
"states" : states,
"from" : from_id,
"size" : size,
"direct" : direction
}
from huobi.service.margin.get_margin_loan_orders import GetMarginLoanOrdersService
return GetMarginLoanOrdersService(params).request(**self.__kwargs)
def get_margin_loan_info(self, symbols: 'str'=None) -> list:
"""
The request of get margin loan info, can return currency loan info list.
:param symbols: The symbol, like "btcusdt,htusdt". (optional)
:return: The cross margin loan info.
"""
check_symbol(symbols)
params = {
"symbols" : symbols
}
from huobi.service.margin.get_margin_loan_info import GetMarginLoanInfoService
return GetMarginLoanInfoService(params).request(**self.__kwargs)
def get_cross_margin_loan_info(self) -> list:
"""
The request of currency loan info list.
:return: The cross margin loan info list.
"""
params = {}
from huobi.service.margin.get_cross_margin_loan_info import GetCrossMarginLoanInfoService
return GetCrossMarginLoanInfoService(params).request(**self.__kwargs)
def post_cross_margin_transfer_in(self, currency: 'str', amount:'float') -> int:
"""
transfer currency to cross account.
:param currency: currency name (mandatory)
:param amount: transfer amount (mandatory)
:return: return transfer id.
"""
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"currency": currency
}
from huobi.service.margin.post_cross_margin_transfer_in import PostCrossMarginTransferInService
return PostCrossMarginTransferInService(params).request(**self.__kwargs)
def post_cross_margin_transfer_out(self, currency: 'str', amount:'float') -> int:
"""
transfer currency to cross account.
:param currency: currency name (mandatory)
:param amount: transfer amount (mandatory)
:return: return transfer id.
"""
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"currency": currency
}
from huobi.service.margin.post_cross_margin_transfer_out import PostCrossMarginTransferOutService
return PostCrossMarginTransferOutService(params).request(**self.__kwargs)
def post_cross_margin_create_loan_orders(self, currency:'str', amount: 'float') -> int:
"""
create cross margin loan orders
:param currency: currency name (mandatory)
:param amount: transfer amount (mandatory)
:return: return order id.
"""
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"currency": currency
}
from huobi.service.margin.post_cross_margin_create_loan_orders import PostCrossMarginCreateLoanOrdersService
return PostCrossMarginCreateLoanOrdersService(params).request(**self.__kwargs)
def post_cross_margin_loan_order_repay(self, order_id: 'str', amount: 'float'):
"""
repay cross margin loan orders
:param order_id: order_id for loan (mandatory)
:param amount: transfer amount (mandatory)
:return: return order id.
"""
check_should_not_none(order_id, "order-id")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"order-id": order_id
}
from huobi.service.margin.post_cross_margin_loan_order_repay import PostCrossMarginLoanOrderRepayService
return PostCrossMarginLoanOrderRepayService(params).request(**self.__kwargs)
def get_cross_margin_loan_orders(self, currency: 'str' = None, state: 'str' = None,
start_date: 'str' = None, end_date: 'str' = None,
from_id: 'int' = None, size: 'int' = None, direct: 'str' = None,
sub_uid: 'int' = None) -> list:
"""
get cross margin loan orders
:return: return list.
"""
params = {
"currency": currency,
"state": state,
"start-date": start_date,
"end-date": end_date,
"from": from_id,
"size": size,
"direct": direct,
"sub-uid": sub_uid
}
from huobi.service.margin.get_cross_margin_loan_orders import GetCrossMarginLoanOrdersService
return GetCrossMarginLoanOrdersService(params).request(**self.__kwargs)
def get_cross_margin_account_balance(self, sub_uid:'int'=None):
"""
get cross margin account balance
:return: cross-margin account.
"""
params = {
"sub-uid": sub_uid
}
from huobi.service.margin.get_cross_margin_account_balance import GetCrossMarginAccountBalanceService
return GetCrossMarginAccountBalanceService(params).request(**self.__kwargs) | 2.640625 | 3 |
vine/commit.py | robinson96/GRAPE | 4 | 13461 | <reponame>robinson96/GRAPE
import os
import option
import grapeGit as git
import grapeConfig
import utility
class Commit(option.Option):
"""
Usage: grape-commit [-m <message>] [-a | <filetree>]
Options:
-m <message> The commit message.
-a Commit modified files that have not been staged.
Arguments:
<filetree> The relative path of files to include in this commit.
"""
def __init__(self):
super(Commit,self).__init__()
self._key = "commit"
self._section = "Workspace"
def description(self):
return "runs git commit in all projects in this workspace"
def commit(self, commitargs, repo):
try:
git.commit(commitargs)
return True
except git.GrapeGitError as e:
utility.printMsg("Commit in %s failed. Perhaps there were no staged changes? Use -a to commit all modified files." % repo)
return False
def execute(self, args):
commitargs = ""
if args['-a']:
commitargs = commitargs + " -a"
elif args["<filetree>"]:
commitargs = commitargs + " %s"% args["<filetree>"]
if not args['-m']:
args["-m"] = utility.userInput("Please enter commit message:")
commitargs += " -m \"%s\"" % args["-m"]
wsDir = utility.workspaceDir()
os.chdir(wsDir)
submodules = [(True, x ) for x in git.getModifiedSubmodules()]
subprojects = [(False, x) for x in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()]
for stage,sub in submodules + subprojects:
os.chdir(os.path.join(wsDir,sub))
subStatus = git.status("--porcelain -uno")
if subStatus:
utility.printMsg("Committing in %s..." % sub)
if self.commit(commitargs, sub) and stage:
os.chdir(wsDir)
utility.printMsg("Staging committed change in %s..." % sub)
git.add(sub)
os.chdir(wsDir)
if submodules or git.status("--porcelain"):
utility.printMsg("Performing commit in outer level project...")
self.commit(commitargs, wsDir)
return True
def setDefaultConfig(self,config):
pass
| 2.546875 | 3 |
allopy/optimize/regret/abstract.py | wangcj05/allopy | 1 | 13462 | from abc import ABC
from typing import List, Optional, Union
import numpy as np
from allopy import OptData
from allopy.penalty import NoPenalty, Penalty
__all__ = ["AbstractObjectiveBuilder", "AbstractConstraintBuilder"]
class AbstractObjectiveBuilder(ABC):
def __init__(self, data: List[OptData], cvar_data: List[OptData], rebalance: bool, time_unit):
self.data, self.cvar_data = format_inputs(data, cvar_data, time_unit)
self.rebalance = rebalance
self.num_scenarios = len(data)
assert self.num_scenarios > 0, "Provide data to the optimizer"
assert self.num_scenarios == len(cvar_data), "data and cvar data must have same number of scenarios"
self.num_assets = data[0].n_assets
assert all(d.n_assets == self.num_assets for d in data), \
f"number of assets in data should equal {self.num_assets}"
assert all(d.n_assets == self.num_assets for d in cvar_data), \
f"number of assets in cvar data should equal {self.num_assets}"
self._penalties = [NoPenalty(self.num_assets)] * self.num_scenarios
@property
def penalties(self):
return self._penalties
@penalties.setter
def penalties(self, penalties):
assert penalties is None or isinstance(penalties, Penalty) or hasattr(penalties, "__iter__"), \
"penalties can be None, a subsclass of the Penalty class or a list which subclasses the Penalty class"
if penalties is None:
self._penalties = [NoPenalty(self.num_assets)] * self.num_scenarios
elif isinstance(penalties, penalties):
self._penalties = [penalties] * self.num_scenarios
else:
penalties = list(penalties)
assert len(penalties) == self.num_scenarios, "number of penalties given must match number of scenarios"
assert all(isinstance(p, Penalty) for p in penalties), "non-Penalty instance detected"
self._penalties = penalties
class AbstractConstraintBuilder(ABC):
def __init__(self, data: List[OptData], cvar_data: List[OptData], rebalance: bool, time_unit):
self.data, self.cvar_data = format_inputs(data, cvar_data, time_unit)
self.rebalance = rebalance
self.num_scenarios = len(self.data)
def format_inputs(data: List[Union[OptData, np.ndarray]],
cvar_data: Optional[List[Union[OptData, np.ndarray]]],
time_unit: int):
data = [d if isinstance(data, OptData) else OptData(d, time_unit) for d in data]
if cvar_data is None:
return [d.cut_by_horizon(3) for d in data]
else:
cvar_data = [c if isinstance(c, OptData) else OptData(c, time_unit) for c in cvar_data]
return data, cvar_data
| 3.03125 | 3 |
dataset-processor3.py | Pawel762/class5-homework | 0 | 13463 | import os
import pandas as pd
import matplotlib.pyplot as plt
wine_df = pd.read_csv(filepath_or_buffer='~/class5-homework/wine.data',
sep=',',
header=None)
wine_df.columns = ['Class','Alcohol','Malic_Acid','Ash','Alcalinity_of_Ash','Magnesium',
'Total_Phenols','Flavanoids','Nonflavanoid_Phenols','Proanthocyanins',
'Color_Intensity','Hue','OD280_OD315_of_Diluted_Wines','Proline']
wine_B = wine_df.drop(['Class'], axis = 1)
os.makedirs('graphs', exist_ok=True)
#Ploting line for alcohol
plt.plot(wine_B['Alcohol'], color='g')
plt.title('Alcohol by Index')
plt.xlabel('Index')
plt.ylabel('Alcohol')
plt.savefig(f'graphs/Alcohol_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Malic_Acid
plt.plot(wine_B['Malic_Acid'], color='g')
plt.title('Malic_Acid by Index')
plt.xlabel('Index')
plt.ylabel('Malic_Acid')
plt.savefig(f'graphs/Malic_Acid_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Ash
plt.plot(wine_B['Ash'], color='g')
plt.title('Ash by Index')
plt.xlabel('Index')
plt.ylabel('Ash')
plt.savefig(f'graphs/Ash_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Alcalinity_of_Ash
plt.plot(wine_B['Alcalinity_of_Ash'], color='g')
plt.title('Alcalinity_of_Ash by Index')
plt.xlabel('Index')
plt.ylabel('Alcalinity_of_Ash')
plt.savefig(f'graphs/Alcalinity_of_Ash_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Magnesium
plt.plot(wine_B['Magnesium'], color='g')
plt.title('Magnesium by Index')
plt.xlabel('Index')
plt.ylabel('Magnesium')
plt.savefig(f'graphs/Magnesium_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Total_Phenols
plt.plot(wine_B['Total_Phenols'], color='g')
plt.title('Total_Phenols by Index')
plt.xlabel('Index')
plt.ylabel('Total_Phenols')
plt.savefig(f'graphs/Total_Phenols_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Flavanoids
plt.plot(wine_B['Flavanoids'], color='g')
plt.title('Flavanoids by Index')
plt.xlabel('Index')
plt.ylabel('Flavanoids')
plt.savefig(f'graphs/Flavanoids_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Nonflavanoid_Phenols
plt.plot(wine_B['Nonflavanoid_Phenols'], color='g')
plt.title('Nonflavanoid_Phenols by Index')
plt.xlabel('Index')
plt.ylabel('Nonflavanoid_Phenols')
plt.savefig(f'graphs/Nonflavanoid_Phenols_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Proanthocyanins
plt.plot(wine_B['Proanthocyanins'], color='g')
plt.title('Proanthocyanins by Index')
plt.xlabel('Index')
plt.ylabel('Proanthocyanins')
plt.savefig(f'graphs/Proanthocyanins_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Color_Intensity
plt.plot(wine_B['Color_Intensity'], color='g')
plt.title('Color_Intensity by Index')
plt.xlabel('Index')
plt.ylabel('Color_Intensity')
plt.savefig(f'graphs/Color_Intensity_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Hue
plt.plot(wine_B['Hue'], color='g')
plt.title('Hue by Index')
plt.xlabel('Index')
plt.ylabel('Hue')
plt.savefig(f'graphs/Hue_by_index_plot.png', format='png')
plt.clf()
#Ploting line for OD280_OD315_of_Diluted_Wines
plt.plot(wine_B['OD280_OD315_of_Diluted_Wines'], color='g')
plt.title('OD280_OD315_of_Diluted_Wines by Index')
plt.xlabel('Index')
plt.ylabel('OD280_OD315_of_Diluted_Wines')
plt.savefig(f'graphs/OD280_OD315_of_Diluted_Wines_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Proline
plt.plot(wine_B['Proline'], color='g')
plt.title('Proline by Index')
plt.xlabel('Index')
plt.ylabel('Proline')
plt.savefig(f'graphs/Proline_by_index_plot.png', format='png')
plt.clf()
#plt.plot(wine_B[i], color='green')
#plt.title(str(i)+' by Index')
#plt.xlabel('Index')
#plt.ylabel(i)
#plt.savefig(f'graphs/'+str(i)+'_by_index_plot.png', format='png')
#plt.clf()
| 2.921875 | 3 |
ares/attack/bim.py | KuanKuanQAQ/ares | 206 | 13464 | <reponame>KuanKuanQAQ/ares
import tensorflow as tf
import numpy as np
from ares.attack.base import BatchAttack
from ares.attack.utils import get_xs_ph, get_ys_ph, maybe_to_array, get_unit
class BIM(BatchAttack):
''' Basic Iterative Method (BIM). A white-box iterative constraint-based method. Require a differentiable loss
function and a ``ares.model.Classifier`` model.
- Supported distance metric: ``l_2``, ``l_inf``.
- Supported goal: ``t``, ``tm``, ``ut``.
- References: https://arxiv.org/abs/1607.02533.
'''
def __init__(self, model, batch_size, loss, goal, distance_metric, session, iteration_callback=None):
''' Initialize BIM.
:param model: The model to attack. A ``ares.model.Classifier`` instance.
:param batch_size: Batch size for the ``batch_attack()`` method.
:param loss: The loss function to optimize. A ``ares.loss.Loss`` instance.
:param goal: Adversarial goals. All supported values are ``'t'``, ``'tm'``, and ``'ut'``.
:param distance_metric: Adversarial distance metric. All supported values are ``'l_2'`` and ``'l_inf'``.
:param session: The ``tf.Session`` to run the attack in. The ``model`` should be loaded into this session.
:param iteration_callback: A function accept a ``xs`` ``tf.Tensor`` (the original examples) and a ``xs_adv``
``tf.Tensor`` (the adversarial examples for ``xs``). During ``batch_attack()``, this callback function would
be runned after each iteration, and its return value would be yielded back to the caller. By default,
``iteration_callback`` is ``None``.
'''
self.model, self.batch_size, self._session = model, batch_size, session
self.loss, self.goal, self.distance_metric = loss, goal, distance_metric
# placeholder for batch_attack's input
self.xs_ph = get_xs_ph(model, batch_size)
self.ys_ph = get_ys_ph(model, batch_size)
# flatten shape of xs_ph
xs_flatten_shape = (batch_size, np.prod(self.model.x_shape))
# store xs and ys in variables to reduce memory copy between tensorflow and python
# variable for the original example with shape of (batch_size, D)
self.xs_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# variable for labels
self.ys_var = tf.Variable(tf.zeros(shape=(batch_size,), dtype=self.model.y_dtype))
# variable for the (hopefully) adversarial example with shape of (batch_size, D)
self.xs_adv_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# magnitude
self.eps_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))
self.eps_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))
# step size
self.alpha_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))
self.alpha_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))
# expand dim for easier broadcast operations
eps = tf.expand_dims(self.eps_var, 1)
alpha = tf.expand_dims(self.alpha_var, 1)
# calculate loss' gradient with relate to the adversarial example
# grad.shape == (batch_size, D)
self.xs_adv_model = tf.reshape(self.xs_adv_var, (batch_size, *self.model.x_shape))
self.loss = loss(self.xs_adv_model, self.ys_var)
grad = tf.gradients(self.loss, self.xs_adv_var)[0]
if goal == 't' or goal == 'tm':
grad = -grad
elif goal != 'ut':
raise NotImplementedError
# update the adversarial example
if distance_metric == 'l_2':
grad_unit = get_unit(grad)
xs_adv_delta = self.xs_adv_var - self.xs_var + alpha * grad_unit
# clip by max l_2 magnitude of adversarial noise
xs_adv_next = self.xs_var + tf.clip_by_norm(xs_adv_delta, eps, axes=[1])
elif distance_metric == 'l_inf':
xs_lo, xs_hi = self.xs_var - eps, self.xs_var + eps
grad_sign = tf.sign(grad)
# clip by max l_inf magnitude of adversarial noise
xs_adv_next = tf.clip_by_value(self.xs_adv_var + alpha * grad_sign, xs_lo, xs_hi)
else:
raise NotImplementedError
# clip by (x_min, x_max)
xs_adv_next = tf.clip_by_value(xs_adv_next, self.model.x_min, self.model.x_max)
self.update_xs_adv_step = self.xs_adv_var.assign(xs_adv_next)
self.config_eps_step = self.eps_var.assign(self.eps_ph)
self.config_alpha_step = self.alpha_var.assign(self.alpha_ph)
self.setup_xs = [self.xs_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape)),
self.xs_adv_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape))]
self.setup_ys = self.ys_var.assign(self.ys_ph)
self.iteration = None
self.iteration_callback = None
if iteration_callback is not None:
xs_model = tf.reshape(self.xs_var, (self.batch_size, *self.model.x_shape))
self.iteration_callback = iteration_callback(xs_model, self.xs_adv_model)
def config(self, **kwargs):
''' (Re)config the attack.
:param magnitude: Max distortion, could be either a float number or a numpy float number array with shape of
(batch_size,).
:param alpha: Step size for each iteration, could be either a float number or a numpy float number array with
shape of (batch_size,).
:param iteration: Iteration count. An integer.
'''
if 'magnitude' in kwargs:
eps = maybe_to_array(kwargs['magnitude'], self.batch_size)
self._session.run(self.config_eps_step, feed_dict={self.eps_ph: eps})
if 'alpha' in kwargs:
alpha = maybe_to_array(kwargs['alpha'], self.batch_size)
self._session.run(self.config_alpha_step, feed_dict={self.alpha_ph: alpha})
if 'iteration' in kwargs:
self.iteration = kwargs['iteration']
def _batch_attack_generator(self, xs, ys, ys_target):
''' Attack a batch of examples. It is a generator which yields back ``iteration_callback()``'s return value
after each iteration if the ``iteration_callback`` is not ``None``, and returns the adversarial examples.
'''
labels = ys if self.goal == 'ut' else ys_target
self._session.run(self.setup_xs, feed_dict={self.xs_ph: xs})
self._session.run(self.setup_ys, feed_dict={self.ys_ph: labels})
for _ in range(self.iteration):
self._session.run(self.update_xs_adv_step)
if self.iteration_callback is not None:
yield self._session.run(self.iteration_callback)
return self._session.run(self.xs_adv_model)
def batch_attack(self, xs, ys=None, ys_target=None):
''' Attack a batch of examples.
:return: When the ``iteration_callback`` is ``None``, return the generated adversarial examples. When the
``iteration_callback`` is not ``None``, return a generator, which yields back the callback's return value
after each iteration and returns the generated adversarial examples.
'''
g = self._batch_attack_generator(xs, ys, ys_target)
if self.iteration_callback is None:
try:
next(g)
except StopIteration as exp:
return exp.value
else:
return g
| 2.84375 | 3 |
parasite/resolver.py | SGevorg/parasite | 9 | 13465 | <gh_stars>1-10
import numpy as np
from functools import lru_cache
from typing import Tuple
class DynamicResolver:
def __init__(self,
matrix: np.ndarray,
*,
num_src_lines: int = None,
num_tgt_lines: int = None,
max_k: int = 3,
windows_importance: bool = False
):
self.matrix = 100 - matrix
self.max_k = max_k
self.windows_importance = windows_importance
self.n, self.m = matrix.shape
self.num_src_lines = num_src_lines or self.n
self.num_tgt_lines = num_tgt_lines or self.m
def __call__(self) -> Tuple[float, Tuple]:
best, path = self.resolve()
return best, path
@lru_cache(maxsize=None)
def offset(self,
begin: int,
end: int,
num_lines: int) -> int:
if end - begin == 1:
return begin
num_window_elements = num_lines - (end - begin) + 2
prev_offset = self.offset(begin, end - 1, num_lines)
return prev_offset + num_window_elements
def extract_candidate(self,
i: int, src_window_size: int,
j: int, tgt_window_size: int,) -> Tuple[float, Tuple]:
from_i = i - src_window_size
from_j = j - tgt_window_size
if from_i < 0 or from_j < 0:
return 0, ()
candidate_score, candidate_path = self.resolve(from_i, from_j)
if src_window_size == 0 or tgt_window_size == 0:
return candidate_score, candidate_path
offset_i = self.offset(from_i, i, self.num_src_lines)
offset_j = self.offset(from_j, j, self.num_tgt_lines)
if offset_i >= self.n or offset_j >= self.m:
return 0, ()
added_score = self.matrix[offset_i, offset_j]
if self.windows_importance:
added_score *= (src_window_size + tgt_window_size)
candidate_score += added_score
candidate_path = ((offset_i, offset_j), candidate_path)
return candidate_score, candidate_path
@lru_cache(maxsize=None)
def resolve(self,
i: int = None, j: int = None) -> Tuple[float, Tuple]:
if i is None:
i = self.num_src_lines
if j is None:
j = self.num_tgt_lines
if i <= 0 or j <= 0:
return 0, ()
best_score: float = 0.0
best_path: Tuple = ()
for src_window_size in range(self.max_k + 1):
for tgt_window_size in range(self.max_k + 1):
if src_window_size == 0 and tgt_window_size == 0:
continue
if src_window_size > 1 and tgt_window_size > 1:
continue
candidate = self.extract_candidate(i, src_window_size,
j, tgt_window_size)
candidate_score, candidate_path = candidate
if candidate_score > best_score:
best_score = candidate_score
best_path = candidate_path
return best_score, best_path
| 2.28125 | 2 |
utils/preprocess.py | Deep-MI/3d-neuro-seg | 0 | 13466 | import numpy as np
"""
Contains preprocessing code for creating additional information based on MRI volumes and true segmentation maps (asegs).
Eg. weight masks for median frequency class weighing, edge weighing etc.
"""
def create_weight_mask(aseg):
"""
Main function for calculating weight mask of segmentation map for loss function. Currently only Median Frequency
Weighing is implemented. Other types can be additively added to the 'weights' variable
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Weight Mask of same shape as aseg
"""
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
weights = np.zeros((h,w,d), dtype=float) # Container ndarray of zeros for weights
weights += median_freq_class_weighing(aseg) # Add median frequency weights
# Further weights (eg. extra weights for region borders) can be added here
# Eg. weights += edge_weights(aseg)
return weights
def median_freq_class_weighing(aseg):
"""
Median Frequency Weighing. Guarded against class absence of certain classes.
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Median frequency weighted mask of same shape as aseg
"""
# Calculates median frequency based weighing for classes
unique, counts = np.unique(aseg, return_counts=True)
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
class_wise_weights = np.median(counts)/counts
aseg = aseg.astype(int)
# Guards against the absence of certain classes in sample
discon_guard_lut = np.zeros(int(max(unique))+1)-1
for idx, val in enumerate(unique):
discon_guard_lut[int(val)] = idx
discon_guard_lut = discon_guard_lut.astype(int)
# Assigns weights to w_mask and resets the missing classes
w_mask = np.reshape(class_wise_weights[discon_guard_lut[aseg.ravel()]], (h, w, d))
return w_mask
# Label mapping functions (to aparc (eval) and to label (train))
def map_label2aparc_aseg(mapped_aseg):
"""
Function to perform look-up table mapping from label space to aparc.DKTatlas+aseg space
:param np.ndarray mapped_aseg: label space segmentation (aparc.DKTatlas + aseg)
:return:
"""
aseg = np.zeros_like(mapped_aseg)
labels = np.array([0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14,
15, 16, 17, 18, 24, 26, 28, 31, 41, 43, 44,
46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63,
77, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1034, 1035,
2002, 2005, 2010, 2012, 2013, 2014, 2016, 2017, 2021, 2022, 2023,
2024, 2025, 2028])
h, w, d = aseg.shape
aseg = labels[mapped_aseg.ravel()]
aseg = aseg.reshape((h, w, d))
return aseg
# if __name__ == "__main__":
# #a = np.random.randint(0, 5, size=(10,10,10))
# #b = np.random.randint(5, 10, size=(10000))
#
# #map_masks_into_5_classes(np.random.randint(0, 250, size=(256, 256, 256)))
#
# import nibabel as nib
# from data_utils.process_mgz_into_hdf5 import map_aparc_aseg2label, map_aseg2label
# path = r"abide_ii/sub-28675/mri/aparc.DKTatlas+aseg.mgz"
# aseg = nib.load(path).get_data()
# labels_full, _ = map_aparc_aseg2label(aseg) # only for 79 classes case
# # labels_full, _ = map_aseg2label(aseg) # only for 37 classes case
# aseg = labels_full
# # print(aseg.shape)
# median_freq_class_weighing(aseg)
# # print(edge_weighing(aseg, 1.5))
| 3.09375 | 3 |
test/test_oneview_hypervisor_cluster_profile_facts.py | nabhajit-ray/oneview-ansible | 108 | 13467 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
import mock
from copy import deepcopy
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import HypervisorClusterProfileFactsModule
PROFILE_URI = '/rest/hypervisor-cluster-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d'
PARAMS_GET_ALL = dict(
config='config.json'
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test Cluster Profile"
)
PARAMS_GET_BY_URI = dict(
config='config.json',
uri="/rest/test/123"
)
PARAMS_WITH_OPTIONS = dict(
config='config.json',
name="Test Cluster Profile",
options=[
'compliancePreview',
]
)
@pytest.mark.resource(TestHypervisorClusterProfileFactsModule='hypervisor_cluster_profiles')
class TestHypervisorClusterProfileFactsModule(OneViewBaseFactsTest):
"""
FactsParamsTestCase has common tests for the parameters support.
"""
def test_should_get_all_cluster_profiles(self):
cluster_profiles = [
{"name": "Cluster Profile Name 1"},
{"name": "Cluster Profile Name 2"}
]
self.mock_ov_client.hypervisor_cluster_profiles.get_all.return_value = cluster_profiles
self.mock_ansible_module.params = deepcopy(PARAMS_GET_ALL)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=cluster_profiles)
)
def test_should_get_by_name(self):
profile = {"name": "Test Cluster Profile", 'uri': '/rest/test/123'}
obj = mock.Mock()
obj.data = profile
self.mock_ov_client.hypervisor_cluster_profiles.get_by_name.return_value = obj
self.mock_ansible_module.params = deepcopy(PARAMS_GET_BY_NAME)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=[profile])
)
def test_should_get_by_uri(self):
cluster_profile = {"name": "Test Cluster Profile", 'uri': '/rest/test/123'}
obj = mock.Mock()
obj.data = cluster_profile
self.mock_ov_client.hypervisor_cluster_profiles.get_by_uri.return_value = obj
self.mock_ansible_module.params = deepcopy(PARAMS_GET_BY_URI)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=[cluster_profile])
)
def test_should_get_cluster_profile_by_name_with_all_options(self):
mock_option_return = {'subresource': 'value'}
self.mock_ov_client.hypervisor_cluster_profiles.data = {"name": "Test Cluster Profile", "uri": PROFILE_URI}
self.mock_ov_client.hypervisor_cluster_profiles.get_by_name.return_value = \
self.mock_ov_client.hypervisor_cluster_profiles
self.mock_ov_client.hypervisor_cluster_profiles.get_compliance_preview.return_value = mock_option_return
self.mock_ansible_module.params = deepcopy(PARAMS_WITH_OPTIONS)
HypervisorClusterProfileFactsModule().run()
self.mock_ov_client.hypervisor_cluster_profiles.get_compliance_preview.assert_called_once_with()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts={'hypervisor_cluster_profiles': [{'name': 'Test Cluster Profile', 'uri': PROFILE_URI}],
'hypervisor_cluster_profile_compliance_preview': mock_option_return,
}
)
if __name__ == '__main__':
pytest.main([__file__])
| 1.835938 | 2 |
utils/predictions.py | jaingaurav3/ML_sample | 19 | 13468 | import os
import scipy
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
def predict_batch(net, inputs):
v = Variable(inputs.cuda(), volatile=True)
return net(v).data.cpu().numpy()
def get_probabilities(model, loader):
model.eval()
return np.vstack(predict_batch(model, data[0]) for data in loader)
def get_predictions(probs, thresholds):
preds = np.copy(probs)
preds[preds >= thresholds] = 1
preds[preds < thresholds] = 0
return preds.astype('uint8')
def get_argmax(output):
val,idx = torch.max(output, dim=1)
return idx.data.cpu().view(-1).numpy()
def get_targets(loader):
targets = None
for data in loader:
if targets is None:
shape = list(data[1].size())
shape[0] = 0
targets = np.empty(shape)
target = data[1]
if len(target.size()) == 1:
target = target.view(-1,1)
target = target.numpy()
targets = np.vstack([targets, target])
return targets
def ensemble_with_method(arr, method):
if method == c.MEAN:
return np.mean(arr, axis=0)
elif method == c.GMEAN:
return scipy.stats.mstats.gmean(arr, axis=0)
elif method == c.VOTE:
return scipy.stats.mode(arr, axis=0)[0][0]
raise Exception("Operation not found") | 2.3125 | 2 |
gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 155 | 13469 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.time import Time
from gammapy.data import FixedPointingInfo, PointingInfo
from gammapy.utils.testing import assert_time_allclose, requires_data
@requires_data()
class TestFixedPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.fpi = FixedPointingInfo.read(filename)
def test_location(self):
lon, lat, height = self.fpi.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.fpi.time_ref, expected)
def test_time_start(self):
time = self.fpi.time_start
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_time_stop(self):
time = self.fpi.time_stop
expected = Time(53025.844770648146, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_duration(self):
duration = self.fpi.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.fpi.radec
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.fpi.altaz
assert_allclose(pos.az.deg, 7.48272)
assert_allclose(pos.alt.deg, 41.84191)
assert pos.name == "altaz"
@requires_data()
class TestPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.pointing_info = PointingInfo.read(filename)
def test_str(self):
ss = str(self.pointing_info)
assert "Pointing info" in ss
def test_location(self):
lon, lat, height = self.pointing_info.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.pointing_info.time_ref, expected)
def test_table(self):
assert len(self.pointing_info.table) == 100
def test_time(self):
time = self.pointing_info.time
assert len(time) == 100
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time[0], expected)
def test_duration(self):
duration = self.pointing_info.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.pointing_info.radec[0]
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.pointing_info.altaz[0]
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
def test_altaz_from_table(self):
pos = self.pointing_info.altaz_from_table[0]
assert_allclose(pos.az.deg, 11.20432353385406)
assert_allclose(pos.alt.deg, 41.37921408774436)
assert pos.name == "altaz"
def test_altaz_interpolate(self):
time = self.pointing_info.time[0]
pos = self.pointing_info.altaz_interpolate(time)
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
| 1.929688 | 2 |
src/config.py | BRAVO68WEB/architus | 0 | 13470 | <gh_stars>0
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# from src.commands import *
# import src.commands as command_modules
secret_token = None
db_user = None
db_pass = None
sessions = {}
try:
lines = [line.rstrip('\n') for line in open('.secret_token')]
secret_token = lines[0]
db_user = lines[1]
db_pass = lines[2]
client_id = lines[3]
client_secret = lines[4]
twitter_consumer_key = lines[5]
twitter_consumer_secret = lines[6]
twitter_access_token_key = lines[7]
twitter_access_token_secret = lines[8]
scraper_token = lines[9]
except Exception as e:
print(e)
print('error reading .secret_token, make it you aut')
def get_session(pid=None):
if pid in sessions:
return sessions[pid]
print("creating postgres session")
try:
engine = create_engine("postgresql://{}:{}@localhost/autbot".format(db_user, db_pass))
Session = sessionmaker(bind=engine)
session = Session()
sessions[pid] = session
except Exception as e:
session = None
print('failed to connect to database')
print(e)
return session
session = get_session()
| 2.78125 | 3 |
aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | 0 | 13471 | import click
import logging
import matplotlib
import matplotlib.pyplot as plt
import joblib
import fact.io
from ..configuration import AICTConfig
from ..plotting import (
plot_regressor_confusion,
plot_bias_resolution,
plot_feature_importances,
)
if matplotlib.get_backend() == 'pgf':
from matplotlib.backends.backend_pgf import PdfPages
else:
from matplotlib.backends.backend_pdf import PdfPages
@click.command()
@click.argument('configuration_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('performance_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('model_path', type=click.Path(exists=True, dir_okay=False))
@click.option('-o', '--output', type=click.Path(exists=False, dir_okay=False))
@click.option('-k', '--key', help='HDF5 key for hdf5', default='data')
def main(configuration_path, performance_path, model_path, output, key):
''' Create some performance evaluation plots for the separator '''
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
log.info('Loading perfomance data')
df = fact.io.read_data(performance_path, key=key)
log.info('Loading model')
model = joblib.load(model_path)
config = AICTConfig.from_yaml(configuration_path)
model_config = config.energy
energy_unit = config.energy_unit
figures = []
# Plot confusion
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
ax.set_title('Reconstructed vs. True Energy (log color scale)')
plot_regressor_confusion(
df, ax=ax,
label_column=model_config.target_column,
prediction_column=model_config.output_name,
energy_unit=energy_unit,
)
# Plot confusion
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
ax.set_title('Reconstructed vs. True Energy (linear color scale)')
plot_regressor_confusion(
df, log_z=False, ax=ax,
label_column=model_config.target_column,
prediction_column=model_config.output_name,
energy_unit=energy_unit,
)
# Plot bias/resolution
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
ax.set_title('Bias and Resolution')
plot_bias_resolution(
df, bins=15, ax=ax,
label_column=model_config.target_column,
prediction_column=model_config.output_name,
energy_unit=energy_unit,
)
if hasattr(model, 'feature_importances_'):
# Plot feature importances
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
features = model_config.features
plot_feature_importances(model, features, ax=ax)
if output is None:
plt.show()
else:
with PdfPages(output) as pdf:
for fig in figures:
fig.tight_layout(pad=0)
pdf.savefig(fig)
| 2.203125 | 2 |
kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 435 | 13472 | <reponame>ruinunca/data_tooling
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
| 2.046875 | 2 |
scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 0 | 13473 | <reponame>RuanBarretodosSantos/python
cont = 3
t1 = 0
t2 = 1
print('-----' * 12)
print('Sequência de Fibonacci')
print('-----' * 12)
valor = int(input('Quantos termos você quer mostrar ? '))
print('~~~~~' * 12)
print(f'{t1} ➙ {t2} ' , end='➙ ')
while cont <= valor:
t3 = t1 + t2
print(f' {t3}', end=' ➙ ')
t1 = t2
t2 = t3
t3 = t1
cont += 1
print(' F I M')
| 3.671875 | 4 |
CGAT/Sra.py | 861934367/cgat | 0 | 13474 | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
Sra.py - Methods for dealing with short read archive files
==========================================================
Utility functions for dealing with :term:`SRA` formatted files from
the Short Read Archive.
Requirements:
* fastq-dump >= 2.1.7
Code
----
'''
import os
import glob
import tempfile
import shutil
import CGAT.Experiment as E
import CGAT.Fastq as Fastq
import CGAT.IOTools as IOTools
def peek(sra, outdir=None):
"""return the full file names for all files which will be extracted
Parameters
----------
outdir : path
perform extraction in outdir. If outdir is None, the extraction
will take place in a temporary directory, which will be deleted
afterwards.
Returns
-------
files : list
A list of fastq formatted files that are contained in the archive.
format : string
The quality score format in the :term:`fastq` formatted files.
"""
if outdir is None:
workdir = tempfile.mkdtemp()
else:
workdir = outdir
# --split-files creates files called prefix_#.fastq.gz,
# where # is the read number.
# If file cotains paired end data:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
# *special case: unpaired reads in a paired end --> prefix.fastq.gz
# *special case: if paired reads are stored in a single read,
# fastq-dump will split. There might be a joining
# sequence. The output would thus be:
# prefix_1.fastq.gz, prefix_2.fastq.gz, prefix_3.fastq.gz
# You want files 1 and 3.
E.run("""fastq-dump --split-files --gzip -X 1000
--outdir %(workdir)s %(sra)s""" % locals())
f = sorted(glob.glob(os.path.join(workdir, "*.fastq.gz")))
ff = [os.path.basename(x) for x in f]
if len(f) == 1:
# sra file contains one read: output = prefix.fastq.gz
pass
elif len(f) == 2:
# sra file contains read pairs:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
assert ff[0].endswith(
"_1.fastq.gz") and ff[1].endswith("_2.fastq.gz")
elif len(f) == 3:
if ff[2].endswith("_3.fastq.gz"):
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
else:
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
# check format of fastqs in .sra
fastq_format = Fastq.guessFormat(IOTools.openFile(f[0], "r"), raises=False)
fastq_datatype = Fastq.guessDataType(IOTools.openFile(f[0], "r"), raises=True)
if outdir is None:
shutil.rmtree(workdir)
return f, fastq_format, fastq_datatype
def extract(sra, outdir, tool="fastq-dump"):
"""return statement for extracting the SRA file in `outdir`.
possible tools are fastq-dump and abi-dump. Use abi-dump for colorspace"""
if tool == "fastq-dump":
tool += " --split-files"
statement = """%(tool)s --gzip --outdir %(outdir)s %(sra)s""" % locals()
return statement
| 1.960938 | 2 |
LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | 0 | 13475 | # Copyright (c) 2019 <NAME> and <NAME>
#
# This file is part of the LipidFinder software tool and governed by the
# 'MIT License'. Please see the LICENSE file that should have been
# included as part of this software.
"""Represent a DataFrame to be processed with LipidFinder's workflow."""
import glob
import logging
import os
import pandas
class LFDataFrame(pandas.core.frame.DataFrame):
"""A LFDataFrame object stores a dataframe to be used as input data
in LipidFinder.
The input data file(s) must comply with the following requirements:
- The format must be: CSV, TSV, XLS or XLSX. For the last two the
user can also specify the sheet to be read (or the list of
sheets if a folder is given as 'src').
- The first column contains an identifier for each row that is
unique throughout every file.
- There is one column named as "mzCol" parameter and another one
as "rtCol" parameter.
- Starting from the column index in "firstSampleIndex" parameter,
every intensity column must follow. For instance, for 2 samples
with 2 technical replicates, 1 quality control sample and 2
solvents, the columns would be as follows:
sample11 , sample12 , sample21 , sample22 , QC1 , sol1, sol2
Ensure that samples with multiple technical replicates are given
names in the format name1, name2, etc. such that each name is
unique for each column. Replicates should be suffixed 1, 2, etc.
Attributes:
src (Public[str])
Source path where the data was loaded from.
_resolution (Private[int])
Number of digits after the radix point in floats.
Examples:
LFDataFrame objects can be created in two different ways:
>>> from Configuration import LFParameters
>>> from LFDataFrame import LFDataFrame
>>> params = LFParameters(module='peakfilter')
>>> csvData = LFDataFrame('input_data.csv', params)
>>> xlsData = LFDataFrame('input_data.xls', params, sheet=2)
>>> folderData = LFDataFrame('/home/user/data/', params)
After loading the required set of parameters, the data can be
loaded from a single file ('csvData' and 'xlsData' examples) or
from multiple files located in the same folder ('folderData'
example). The latter is meant to be used to merge multiple files
split by time ranges that represent a single run. The first and
last retention time (RT) minutes of every file are trimmed as
they are considered unreliable (except for the first and last
minutes of the first and last files, respectively). The method
supports overlap (after trimming), and the frames retained will
be those from the file with the most frames for each overlapping
minute.
The number of decimal places to keep from the input m/z column
can be changed assigning a value to 'resolution' variable. It
has been predefined to 6, a standard value in high-resolution
liquid-chromatography coupled to mass-spectrometry.
"""
def __init__(self, src, parameters, resolution=6, sheet=0):
# type: (str, LFParameters, int, object) -> LFDataFrame
"""Constructor of the class LFDataFrame.
Keyword Arguments:
src -- source path where to load the data from
parameters -- LipidFinder's parameters instance (can be for
any module)
resolution -- number of decimal places to keep from m/z
column [default: 6]
sheet -- sheet number or list of sheet numbers to read
when input file(s) have XLS or XLSX extension
(zero-indexed position) [default: 0]
"""
rtCol = parameters['rtCol']
if (not os.path.isdir(src)):
data = self._read_file(src, parameters, sheet)
else:
# Create a list of the input files in the source folder (in
# alphabetical order)
fileList = sorted(glob.iglob(os.path.join(src, '*.*')))
if (len(fileList) == 0):
raise FileNotFoundError("No files found in '{0}'".format(src))
data = self._read_file(fileList[0], parameters, sheet[0])
if (len(fileList) > 1):
# Sort first dataframe by RT
data.sort_values([rtCol], inplace=True, kind='mergesort')
# Append "minute" column to the dataframe with the
# integer part of the float values of its RT column
timeCol = 'minute'
data = data.assign(minute=data[rtCol].astype(int))
# Since it is the first file, remove the frames
# corresponding to the last minute
data = data[data[timeCol] != data.iloc[-1][timeCol]]
for index, filePath in enumerate(fileList[1:], start=1):
chunk = self._read_file(filePath, parameters, sheet[index])
# Sort next chunk dataframe by RT
chunk.sort_values([rtCol], inplace=True, kind='mergesort')
# Append "minute" column to the dataframe with the
# integer part of the float values of its RT column
chunk = chunk.assign(minute=chunk[rtCol].astype(int))
# Remove the frames of the first minute
chunk = chunk[chunk[timeCol] != chunk.iloc[0][timeCol]]
if (index < (len(fileList) - 1)):
# Since it is not the last file, remove the
# frames corresponding to the last minute
chunk = chunk[chunk[timeCol] != chunk.iloc[-1][timeCol]]
# Create a dataframe with the number of frames per
# minute for both the dataframe and the next chunk
overlap = pandas.DataFrame(
{'data': data.groupby(timeCol).size(),
'chunk': chunk.groupby(timeCol).size()}
).fillna(0)
# Keep the minutes where the number of frames in the
# next chunk is higher than in the current dataframe
overlap = overlap[overlap['chunk'] > overlap['data']]
minutesToReplace = overlap.index.tolist()
if (minutesToReplace):
# Remove the dataframe frames to be replaced
data = data[~data[timeCol].isin(minutesToReplace)]
# Append chunk frames preserving the column
# order of the main dataframe
data = data.append(
chunk[chunk[timeCol].isin(minutesToReplace)],
ignore_index=True
)[data.columns.tolist()]
# Drop "minute" column as it will be no longer necessary
data.drop(timeCol, axis=1, inplace=True)
# Rename first column if no name was given in the input file(s)
data.rename(columns={'Unnamed: 0': 'id'}, inplace=True)
# Sort dataframe by m/z and RT, and reset the indexing
mzCol = parameters['mzCol']
data.sort_values([mzCol, rtCol], inplace=True, kind='mergesort')
data.reset_index(drop=True, inplace=True)
# Adjust m/z column values to the machine's maximum float
# resolution
data[mzCol] = data[mzCol].apply(round, ndigits=resolution)
super(LFDataFrame, self).__init__(data=data)
self.src = src
self._resolution = resolution
def drop_empty_frames(self, module, parameters, means=False):
# type: (str, LFParameters, bool) -> None
"""Remove empty frames from the dataframe and reset the index.
An empty frame is a row for which every sample replicate or
sample mean has a zero intensity.
Keyword Arguments:
module -- module name to write in the logging file
parameters -- LipidFinder's parameters instance (can be for
any module)
means -- check sample means instead of each sample
replicate? [default: False]
"""
if (means):
meanColIndexes = [i for i, col in enumerate(self.columns)
if col.endswith('_mean')]
if (parameters['numSolventReps'] > 0):
# The first mean column is for the solvents
firstIndex = meanColIndexes[1]
else:
firstIndex = meanColIndexes[0]
lastIndex = meanColIndexes[-1]
else:
firstIndex = parameters['firstSampleIndex'] - 1
lastIndex = firstIndex \
+ (parameters['numSamples'] * parameters['numTechReps'])
# Get the indices of all empty frames
emptyFrames = self.iloc[:, firstIndex : lastIndex].eq(0).all(axis=1)
indices = self[emptyFrames].index.tolist()
if (indices):
# Drop empty frames and reset the index
self.drop(module, labels=indices, axis=0, inplace=True)
self.reset_index(drop=True, inplace=True)
def drop(self, module, **kwargs):
# type: (str, ...) -> LFDataFrame
"""Wrapper of pandas.DataFrame.drop() with logging report.
The report will be updated only if the labels correspond to
rows, i.e. kwargs['axis'] == 0 (default value).
Keyword Arguments:
module -- module name to write in the logging file
*kwargs -- arguments to pass to pandas.DataFrame.drop()
"""
# Create logger to print message to the log file
logger = logging.getLogger(module)
logger.setLevel(logging.INFO)
if ((len(kwargs['labels']) > 0) and (kwargs.get('axis', 0) == 0)):
idCol = self.columns[0]
idList = [str(x) for x in sorted(self.loc[kwargs['labels'], idCol])]
logger.info('%s: removed %d rows. IDs: %s', module, len(idList),
','.join(idList))
return super(LFDataFrame, self).drop(**kwargs)
@staticmethod
def _read_file(src, parameters, sheet):
# type: (str, LFParameters, int) -> pandas.core.frame.DataFrame
"""Return a dataframe with the same content as the source file,
but with retention time in minutes.
The read function will be configured based on the file's
extension. Accepted extensions: CSV, TSV, XLS, XLSX.
Keyword Arguments:
src -- source file path
parameters -- LipidFinder's parameters instance (can be for
any module)
sheet -- sheet number to read when the input file has
XLS or XLSX extension (zero-indexed position)
"""
extension = os.path.splitext(src)[1].lower()[1:]
# Load file based on its extension
if (extension == 'csv'):
data = pandas.read_csv(src, float_precision='high')
elif (extension == 'tsv'):
data = pandas.read_csv(src, sep='\t', float_precision='high')
elif (extension in ['xls', 'xlsx']):
data = pandas.read_excel(src, sheet_name=sheet)
else:
raise IOError(("Unknown file extension '{0}'. Expected: csv, tsv, "
"xls, xlsx").format(extension))
if (('timeUnit' in parameters) and
(parameters['timeUnit'] == 'Seconds')):
rtCol = parameters['rtCol']
data[rtCol] = data[rtCol].apply(lambda x: round(x / 60.0, 2))
return data
| 2.75 | 3 |
tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | 0 | 13476 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_fused_embedding_ops
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_local_sparse_look_up_grad
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_local_sparse_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_pre_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_post_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_post_look_up_grad
from tensorflow.python.util.tf_export import tf_export
def fused_embedding_lookup_sparse(embedding_weights,
sparse_ids,
combiner=None,
name=None,
max_norm=None):
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
# get underlying Variables.
embedding_weights = list(embedding_weights)
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
with ops.name_scope(name, "fused_embedding_lookup", embedding_weights +
[sparse_ids]) as scope:
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if not isinstance(sparse_ids, sparse_tensor.SparseTensor):
raise TypeError("sparse_ids must be SparseTensor")
partition_nums = len(embedding_weights)
# Local fused embedding lookup. Only support local look up and tf.Variable as
# embedding weight. So skip it for now.
#emb_vectors, _ = fused_embedding_local_sparse_look_up(sp_values=sparse_ids.values,
# sp_indices=sparse_ids.indices,
# sp_dense_shape=sparse_ids.dense_shape,
# emb_variable=embedding_weights[0],
# combiner=combiner,
# max_norm=max_norm)
partition_shapes = [w.shape for w in embedding_weights]
partitioned_values, partitioned_indices = fused_embedding_sparse_pre_look_up(
partition_shapes=partition_shapes,
sp_values=sparse_ids.values,
sp_indices=sparse_ids.indices,
)
emb_shards = []
for i in range(partition_nums):
embedding = embedding_weights[i]
sub_partition_values = partitioned_values[i]
with ops.colocate_with(embedding):
shard = array_ops.gather(embedding, sub_partition_values)
emb_shards.append(shard)
emb_vectors, _ = fused_embedding_sparse_post_look_up(
emb_shards=emb_shards, partitioned_indices=partitioned_indices,
sp_dense_shape=sparse_ids.dense_shape,
partitioned_values=partitioned_values,
combiner=combiner, max_norm=max_norm
)
return emb_vectors
@ops.RegisterGradient("FusedEmbeddingLocalSparseLookUp")
def fused_embedding_local_sparse_look_up_grad(op, top_grad_emb_vec, _):
grad_sp_values = gen_fused_embedding_ops.fused_embedding_local_sparse_look_up_grad(
top_grad=top_grad_emb_vec, emb_variable=op.inputs[3],
sp_values=op.inputs[0], sp_values_offset=op.outputs[1],
combiner=op.get_attr("combiner"),
max_norm=op.get_attr("max_norm")
)
grads = ops.IndexedSlices(values=grad_sp_values,
indices=op.inputs[0])
return [None, None, None, grads]
@ops.RegisterGradient("FusedEmbeddingSparsePostLookUp")
def fused_embedding_sparse_post_look_up_grad(op, top_grad_emb_vec, _):
num_partitions = op.get_attr("num_partitions")
grad_shards = gen_fused_embedding_ops.fused_embedding_sparse_post_look_up_grad(
top_grad=top_grad_emb_vec, emb_shards=[op.inputs[i] for i in range(0, num_partitions)],
partitioned_indices=[op.inputs[i] for i in range(num_partitions, 2 * num_partitions)],
feature_nums=op.outputs[1], combiner=op.get_attr("combiner"),
max_norm=op.get_attr("max_norm")
)
return grad_shards + [None for _ in range(0, 2 * num_partitions + 1)]
| 1.992188 | 2 |
docs/source/conf.py | deeplook/ipycanvas | 0 | 13477 | <reponame>deeplook/ipycanvas
# -*- coding: utf-8 -*-
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
# 'sphinx.ext.viewcode',
# 'jupyter_sphinx.embed_widgets',
]
templates_path = ['_templates']
master_doc = 'index'
source_suffix = '.rst'
# General information about the project.
project = 'ipycanvas'
author = '<NAME>'
exclude_patterns = []
highlight_language = 'python'
pygments_style = 'sphinx'
# Output file base name for HTML help builder.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
htmlhelp_basename = 'ipycanvasdoc'
autodoc_member_order = 'bysource'
| 1.632813 | 2 |
pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | 0 | 13478 | #!/usr/bin/env python3
#
# Author: <NAME>
# License: BSD 2-clause
# Last Change: Sun May 09, 2021 at 02:52 AM +0200
import numpy as np
ARRAY_TYPE = 'np'
def read_branch(ntp, tree, branch, idx=None):
data = ntp[tree][branch].array(library=ARRAY_TYPE)
return data if not idx else data[idx]
def read_branches_dict(ntp, tree, branches):
return ntp[tree].arrays(branches, library=ARRAY_TYPE)
def read_branches(ntp, tree, branches, idx=None, transpose=False):
data = list(ntp[tree].arrays(branches, library=ARRAY_TYPE).values())
if idx is not None:
data = [d[idx] for d in data]
return np.column_stack(data) if transpose else data
| 2.6875 | 3 |
clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | 0 | 13479 | <filename>clinnotes/reminders/forms.py<gh_stars>0
from django import forms
from .models import Reminder
from clinnotes.users.models import EpisodeOfCare
class ReminderForm(forms.ModelForm):
class Meta:
model = Reminder
fields = ['category', 'title', 'details', 'episode_of_care']
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(ReminderForm, self).__init__(*args, **kwargs)
self.fields['episode_of_care'].queryset = EpisodeOfCare.objects.filter(clinician=user) | 2 | 2 |
AlgorithmB.py | tejaDhulipala/SnowflakeGen | 0 | 13480 | <filename>AlgorithmB.py<gh_stars>0
import pygame as pg
from shapely.geometry import Point, Polygon
from time import perf_counter
# Vars
A = [(100, 600), (700, 600), (400, 80)]
triangles = [[(100, 600), (700, 600), (400, 80)]]
SQRT_3 = 3 ** (1 / 2)
WHITE = (255, 255, 255)
# Graphics part
pg.init()
screen = pg.display.set_mode((800, 800))
# Funcs
distance = lambda x, y: ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** 0.5
def generatePoints(pt1, pt2, reference):
slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])
a = pt1[0] + (pt2[0] - pt1[0]) / 3
b = pt1[1] + (pt2[1] - pt1[1]) / 3
c = pt1[0] + (pt2[0] - pt1[0]) * 2 / 3
d = pt1[1] + (pt2[1] - pt1[1]) * 2 / 3
ptm = (pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2
dis = distance((a, b), (c, d))
h = SQRT_3/2 * dis
if slope == 0:
ptc1 = ptm[0], ptm[1] - h
ptc2 = ptm[0], ptm[1] + h
ptc = ptc1 if distance(reference, ptc1) > distance(ptc2, reference) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
perp = -1 / slope
x_c = h / (perp ** 2 + 1) ** 0.5
y_c = perp * x_c
ptc1 = round(ptm[0] - x_c), round(ptm[1] - y_c)
ptc2 = round(ptm[0] + x_c), round(ptm[1] + y_c)
ptc = ptc1 if distance(reference, ptc1) > distance(ptc2, reference) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
def generatePoints_2(pt1, pt2, father: Polygon):
slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])
a = pt1[0] + (pt2[0] - pt1[0]) / 3
b = pt1[1] + (pt2[1] - pt1[1]) / 3
c = pt1[0] + (pt2[0] - pt1[0]) * 2 / 3
d = pt1[1] + (pt2[1] - pt1[1]) * 2 / 3
ptm = (pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2
dis = distance((a, b), (c, d))
h = SQRT_3/2 * dis
if slope == 0:
ptc1 = ptm[0], ptm[1] - h
ptc2 = ptm[0], ptm[1] + h
ptc = ptc1 if father.contains(Point(*ptc2)) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
perp = -1 / slope
x_c = h / (perp ** 2 + 1) ** 0.5
y_c = perp * x_c
ptc1 = round(ptm[0] - x_c), round(ptm[1] - y_c)
ptc2 = round(ptm[0] + x_c), round(ptm[1] + y_c)
ptc = ptc1 if father.contains(Point(*ptc2)) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
def generateSnowflake(array: list, level):
for i in range(level):
org = array.copy()
for j in range(len(org)):
pt1 = org[j]
pt2 = org[(j + 1) % (len(org))]
ref = None
for triangle in triangles:
if pt1 in triangle and pt2 in triangle:
b = triangle.copy()
b.remove(pt1)
b.remove(pt2)
ref = b[0]
if ref == None:
pta, ptb, ptc = generatePoints_2(pt1, pt2, Polygon(array))
else:
pta, ptb, ptc = generatePoints(pt1, pt2, ref)
index = array.index(pt2)
array.insert(index, ptb)
array.insert(index, ptc)
array.insert(index, pta)
triangles.append([pta, ptb, ptc])
start = perf_counter()
# Call Func
generateSnowflake(A, 6)
print(len(A))
# Game Loop
while True:
screen.fill(WHITE)
A.append(A[0])
for i in range(len(A) - 1):
pg.draw.line(screen, (0, 0, 0), A[i], A[i + 1])
# exit code
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
quit(0)
# Updating
pg.display.update()
print(perf_counter() - start)
| 2.390625 | 2 |
validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | 0 | 13481 | <gh_stars>0
from copy import deepcopy
from functools import partial
import sys
import types
# Global import of predefinedentities will cause an import loop
import instanceactions
from validator.constants import (BUGZILLA_BUG, DESCRIPTION_TYPES, FENNEC_GUID,
FIREFOX_GUID, MAX_STR_SIZE, MDN_DOC)
from validator.decorator import version_range
from jstypes import JSArray, JSContext, JSLiteral, JSObject, JSWrapper
NUMERIC_TYPES = (int, long, float, complex)
# None of these operations (or their augmented assignment counterparts) should
# be performed on non-numeric data. Any time we get non-numeric data for these
# guys, we just return window.NaN.
NUMERIC_OPERATORS = ('-', '*', '/', '%', '<<', '>>', '>>>', '|', '^', '&')
NUMERIC_OPERATORS += tuple('%s=' % op for op in NUMERIC_OPERATORS)
def get_NaN(traverser):
# If we've cached the traverser's NaN instance, just use that.
ncache = getattr(traverser, 'NAN_CACHE', None)
if ncache is not None:
return ncache
# Otherwise, we need to import GLOBAL_ENTITIES and build a raw copy.
from predefinedentities import GLOBAL_ENTITIES
ncache = traverser._build_global('NaN', GLOBAL_ENTITIES[u'NaN'])
# Cache it so we don't need to do this again.
traverser.NAN_CACHE = ncache
return ncache
def _get_member_exp_property(traverser, node):
"""Return the string value of a member expression's property."""
if node['property']['type'] == 'Identifier' and not node.get('computed'):
return unicode(node['property']['name'])
else:
eval_exp = traverser._traverse_node(node['property'])
return _get_as_str(eval_exp.get_literal_value())
def _expand_globals(traverser, node):
"""Expands a global object that has a lambda value."""
if node.is_global and callable(node.value.get('value')):
result = node.value['value'](traverser)
if isinstance(result, dict):
output = traverser._build_global('--', result)
elif isinstance(result, JSWrapper):
output = result
else:
output = JSWrapper(result, traverser)
# Set the node context.
if 'context' in node.value:
traverser._debug('CONTEXT>>%s' % node.value['context'])
output.context = node.value['context']
else:
traverser._debug('CONTEXT>>INHERITED')
output.context = node.context
return output
return node
def trace_member(traverser, node, instantiate=False):
'Traces a MemberExpression and returns the appropriate object'
traverser._debug('TESTING>>%s' % node['type'])
if node['type'] == 'MemberExpression':
# x.y or x[y]
# x = base
base = trace_member(traverser, node['object'], instantiate)
base = _expand_globals(traverser, base)
identifier = _get_member_exp_property(traverser, node)
# Handle the various global entity properties.
if base.is_global:
# If we've got an XPCOM wildcard, return a copy of the entity.
if 'xpcom_wildcard' in base.value:
traverser._debug('MEMBER_EXP>>XPCOM_WILDCARD')
from predefinedentities import CONTRACT_ENTITIES
if identifier in CONTRACT_ENTITIES:
kw = dict(err_id=('js', 'actions', 'dangerous_contract'),
warning='Dangerous XPCOM contract ID')
kw.update(CONTRACT_ENTITIES[identifier])
traverser.warning(**kw)
base.value = base.value.copy()
del base.value['xpcom_wildcard']
return base
test_identifier(traverser, identifier)
traverser._debug('MEMBER_EXP>>PROPERTY: %s' % identifier)
output = base.get(
traverser=traverser, instantiate=instantiate, name=identifier)
output.context = base.context
if base.is_global:
# In the cases of XPCOM objects, methods generally
# remain bound to their parent objects, even when called
# indirectly.
output.parent = base
return output
elif node['type'] == 'Identifier':
traverser._debug('MEMBER_EXP>>ROOT:IDENTIFIER')
test_identifier(traverser, node['name'])
# If we're supposed to instantiate the object and it doesn't already
# exist, instantitate the object.
if instantiate and not traverser._is_defined(node['name']):
output = JSWrapper(JSObject(), traverser=traverser)
traverser.contexts[0].set(node['name'], output)
else:
output = traverser._seek_variable(node['name'])
return _expand_globals(traverser, output)
else:
traverser._debug('MEMBER_EXP>>ROOT:EXPRESSION')
# It's an expression, so just try your damndest.
return traverser._traverse_node(node)
def test_identifier(traverser, name):
'Tests whether an identifier is banned'
import predefinedentities
if name in predefinedentities.BANNED_IDENTIFIERS:
traverser.err.warning(
err_id=('js', 'actions', 'banned_identifier'),
warning='Banned or deprecated JavaScript Identifier',
description=predefinedentities.BANNED_IDENTIFIERS[name],
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def _function(traverser, node):
'Prevents code duplication'
def wrap(traverser, node):
me = JSObject()
traverser.function_collection.append([])
# Replace the current context with a prototypeable JS object.
traverser._pop_context()
me.type_ = 'default' # Treat the function as a normal object.
traverser._push_context(me)
traverser._debug('THIS_PUSH')
traverser.this_stack.append(me) # Allow references to "this"
# Declare parameters in the local scope
params = []
for param in node['params']:
if param['type'] == 'Identifier':
params.append(param['name'])
elif param['type'] == 'ArrayPattern':
for element in param['elements']:
# Array destructuring in function prototypes? LOL!
if element is None or element['type'] != 'Identifier':
continue
params.append(element['name'])
local_context = traverser._peek_context(1)
for param in params:
var = JSWrapper(lazy=True, traverser=traverser)
# We can assume that the params are static because we don't care
# about what calls the function. We want to know whether the
# function solely returns static values. If so, it is a static
# function.
local_context.set(param, var)
traverser._traverse_node(node['body'])
# Since we need to manually manage the "this" stack, pop off that
# context.
traverser._debug('THIS_POP')
traverser.this_stack.pop()
# Call all of the function collection's members to traverse all of the
# child functions.
func_coll = traverser.function_collection.pop()
for func in func_coll:
func()
# Put the function off for traversal at the end of the current block scope.
traverser.function_collection[-1].append(partial(wrap, traverser, node))
return JSWrapper(traverser=traverser, callable=True, dirty=True)
def _define_function(traverser, node):
me = _function(traverser, node)
traverser._peek_context(2).set(node['id']['name'], me)
return me
def _func_expr(traverser, node):
'Represents a lambda function'
return _function(traverser, node)
def _define_with(traverser, node):
'Handles `with` statements'
object_ = traverser._traverse_node(node['object'])
if isinstance(object_, JSWrapper) and isinstance(object_.value, JSObject):
traverser.contexts[-1] = object_.value
traverser.contexts.append(JSContext('block'))
return
def _define_var(traverser, node):
'Creates a local context variable'
traverser._debug('VARIABLE_DECLARATION')
traverser.debug_level += 1
declarations = (node['declarations'] if 'declarations' in node
else node['head'])
kind = node.get('kind', 'let')
for declaration in declarations:
# It could be deconstruction of variables :(
if declaration['id']['type'] == 'ArrayPattern':
vars = []
for element in declaration['id']['elements']:
# NOTE : Multi-level array destructuring sucks. Maybe implement
# it someday if you're bored, but it's so rarely used and it's
# so utterly complex, there's probably no need to ever code it
# up.
if element is None or element['type'] != 'Identifier':
vars.append(None)
continue
vars.append(element['name'])
# The variables are not initialized
if declaration['init'] is None:
# Simple instantiation; no initialization
for var in vars:
if not var:
continue
traverser._declare_variable(var, None)
# The variables are declared inline
elif declaration['init']['type'] == 'ArrayPattern':
# TODO : Test to make sure len(values) == len(vars)
for value in declaration['init']['elements']:
if vars[0]:
traverser._declare_variable(
vars[0], JSWrapper(traverser._traverse_node(value),
traverser=traverser))
vars = vars[1:] # Pop off the first value
# It's being assigned by a JSArray (presumably)
elif declaration['init']['type'] == 'ArrayExpression':
assigner = traverser._traverse_node(declaration['init'])
for value in assigner.value.elements:
if vars[0]:
traverser._declare_variable(vars[0], value)
vars = vars[1:]
elif declaration['id']['type'] == 'ObjectPattern':
init = traverser._traverse_node(declaration['init'])
def _proc_objpattern(init_obj, properties):
for prop in properties:
# Get the name of the init obj's member
if prop['key']['type'] == 'Literal':
prop_name = prop['key']['value']
elif prop['key']['type'] == 'Identifier':
prop_name = prop['key']['name']
else:
continue
if prop['value']['type'] == 'Identifier':
traverser._declare_variable(
prop['value']['name'],
init_obj.get(traverser, prop_name))
elif prop['value']['type'] == 'ObjectPattern':
_proc_objpattern(init_obj.get(traverser, prop_name),
prop['value']['properties'])
if init is not None:
_proc_objpattern(init_obj=init,
properties=declaration['id']['properties'])
else:
var_name = declaration['id']['name']
traverser._debug('NAME>>%s' % var_name)
var_value = traverser._traverse_node(declaration['init'])
traverser._debug('VALUE>>%s' % (var_value.output()
if var_value is not None
else 'None'))
if not isinstance(var_value, JSWrapper):
var = JSWrapper(value=var_value,
const=kind == 'const',
traverser=traverser)
else:
var = var_value
var.const = kind == 'const'
traverser._declare_variable(var_name, var, type_=kind)
if 'body' in node:
traverser._traverse_node(node['body'])
traverser.debug_level -= 1
# The "Declarations" branch contains custom elements.
return True
def _define_obj(traverser, node):
'Creates a local context object'
var = JSObject()
for prop in node['properties']:
if prop['type'] == 'PrototypeMutation':
var_name = 'prototype'
else:
key = prop['key']
if key['type'] == 'Literal':
var_name = key['value']
elif isinstance(key['name'], basestring):
var_name = key['name']
else:
if 'property' in key['name']:
name = key['name']
else:
name = {'property': key['name']}
var_name = _get_member_exp_property(traverser, name)
var_value = traverser._traverse_node(prop['value'])
var.set(var_name, var_value, traverser)
# TODO: Observe "kind"
if not isinstance(var, JSWrapper):
return JSWrapper(var, lazy=True, traverser=traverser)
var.lazy = True
return var
def _define_array(traverser, node):
"""Instantiate an array object from the parse tree."""
arr = JSArray()
arr.elements = map(traverser._traverse_node, node['elements'])
return arr
def _define_template_strings(traverser, node):
"""Instantiate an array of raw and cooked template strings."""
cooked = JSArray()
cooked.elements = map(traverser._traverse_node, node['cooked'])
raw = JSArray()
raw.elements = map(traverser._traverse_node, node['raw'])
cooked.set('raw', raw, traverser)
return cooked
def _define_template(traverser, node):
"""Instantiate a template literal."""
elements = map(traverser._traverse_node, node['elements'])
return reduce(partial(_binary_op, '+', traverser=traverser), elements)
def _define_literal(traverser, node):
"""
Convert a literal node in the parse tree to its corresponding
interpreted value.
"""
value = node['value']
if isinstance(value, dict):
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
wrapper = JSWrapper(value if value is not None else JSLiteral(None),
traverser=traverser)
test_literal(traverser, wrapper)
return wrapper
def test_literal(traverser, wrapper):
"""
Test the value of a literal, in particular only a string literal at the
moment, against possibly dangerous patterns.
"""
value = wrapper.get_literal_value()
if isinstance(value, basestring):
# Local import to prevent import loop.
from validator.testcases.regex import validate_string
validate_string(value, traverser, wrapper=wrapper)
def _call_expression(traverser, node):
args = node['arguments']
for arg in args:
traverser._traverse_node(arg, source='arguments')
member = traverser._traverse_node(node['callee'])
if (traverser.filename.startswith('defaults/preferences/') and
('name' not in node['callee'] or
node['callee']['name'] not in (u'pref', u'user_pref'))):
traverser.err.warning(
err_id=('testcases_javascript_actions',
'_call_expression',
'complex_prefs_defaults_code'),
warning='Complex code should not appear in preference defaults '
'files',
description="Calls to functions other than 'pref' and 'user_pref' "
'should not appear in defaults/preferences/ files.',
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
if member.is_global and callable(member.value.get('dangerous', None)):
result = member.value['dangerous'](a=args, t=traverser._traverse_node,
e=traverser.err)
name = member.value.get('name', '')
if result and name:
kwargs = {
'err_id': ('testcases_javascript_actions', '_call_expression',
'called_dangerous_global'),
'warning': '`%s` called in potentially dangerous manner' %
member.value['name'],
'description':
'The global `%s` function was called using a set '
'of dangerous parameters. Calls of this nature '
'are deprecated.' % member.value['name']}
if isinstance(result, DESCRIPTION_TYPES):
kwargs['description'] = result
elif isinstance(result, dict):
kwargs.update(result)
traverser.warning(**kwargs)
elif (node['callee']['type'] == 'MemberExpression' and
node['callee']['property']['type'] == 'Identifier'):
# If we can identify the function being called on any member of any
# instance, we can use that to either generate an output value or test
# for additional conditions.
identifier_name = node['callee']['property']['name']
if identifier_name in instanceactions.INSTANCE_DEFINITIONS:
result = instanceactions.INSTANCE_DEFINITIONS[identifier_name](
args, traverser, node, wrapper=member)
return result
if member.is_global and 'return' in member.value:
if 'object' in node['callee']:
member.parent = trace_member(traverser, node['callee']['object'])
return member.value['return'](wrapper=member, arguments=args,
traverser=traverser)
return JSWrapper(JSObject(), dirty=True, traverser=traverser)
def _call_settimeout(a, t, e):
"""
Handler for setTimeout and setInterval. Should determine whether a[0]
is a lambda function or a string. Strings are banned, lambda functions are
ok. Since we can't do reliable type testing on other variables, we flag
those, too.
"""
if not a:
return
if a[0]['type'] in ('FunctionExpression', 'ArrowFunctionExpression'):
return
if t(a[0]).callable:
return
return {'err_id': ('javascript', 'dangerous_global', 'eval'),
'description':
'In order to prevent vulnerabilities, the `setTimeout` '
'and `setInterval` functions should be called only with '
'function expressions as their first argument.',
'signing_help': (
'Please do not ever call `setTimeout` or `setInterval` with '
'string arguments. If you are passing a function which is '
'not being correctly detected as such, please consider '
'passing a closure or arrow function, which in turn calls '
'the original function.'),
'signing_severity': 'high'}
def _call_require(a, t, e):
"""
Tests for unsafe uses of `require()` in SDK add-ons.
"""
args, traverse, err = a, t, e
if not err.metadata.get('is_jetpack') and len(args):
return
module = traverse(args[0]).get_literal_value()
if not isinstance(module, basestring):
return
if module.startswith('sdk/'):
module = module[len('sdk/'):]
LOW_LEVEL = {
# Added from bugs 689340, 731109
'chrome', 'window-utils', 'observer-service',
# Added from bug 845492
'window/utils', 'sdk/window/utils', 'sdk/deprecated/window-utils',
'tab/utils', 'sdk/tab/utils',
'system/events', 'sdk/system/events',
}
if module in LOW_LEVEL:
err.metadata['requires_chrome'] = True
return {'warning': 'Usage of low-level or non-SDK interface',
'description': 'Your add-on uses an interface which bypasses '
'the high-level protections of the add-on SDK. '
'This interface should be avoided, and its use '
'may significantly complicate your review '
'process.'}
if module == 'widget':
return {'warning': 'Use of deprecated SDK module',
'description':
"The 'widget' module has been deprecated due to a number "
'of performance and usability issues, and has been '
'removed from the SDK as of Firefox 40. Please use the '
"'sdk/ui/button/action' or 'sdk/ui/button/toggle' module "
'instead. See '
'https://developer.mozilla.org/Add-ons/SDK/High-Level_APIs'
'/ui for more information.'}
def _call_create_pref(a, t, e):
"""
Handler for pref() and user_pref() calls in defaults/preferences/*.js files
to ensure that they don't touch preferences outside of the "extensions."
branch.
"""
# We really need to clean up the arguments passed to these functions.
traverser = t.im_self
if not traverser.filename.startswith('defaults/preferences/') or not a:
return
instanceactions.set_preference(JSWrapper(JSLiteral(None),
traverser=traverser),
a, traverser)
value = _get_as_str(t(a[0]))
return test_preference(value)
def test_preference(value):
for branch in 'extensions.', 'services.sync.prefs.sync.extensions.':
if value.startswith(branch) and value.rindex('.') > len(branch):
return
return ('Extensions should not alter preferences outside of the '
"'extensions.' preference branch. Please make sure that "
"all of your extension's preferences are prefixed with "
"'extensions.add-on-name.', where 'add-on-name' is a "
'distinct string unique to and indicative of your add-on.')
def _readonly_top(traverser, right, node_right):
"""Handle the readonly callback for window.top."""
traverser.notice(
err_id=('testcases_javascript_actions',
'_readonly_top'),
notice='window.top is a reserved variable',
description='The `top` global variable is reserved and cannot be '
'assigned any values starting with Gecko 6. Review your '
'code for any uses of the `top` global, and refer to '
'%s for more information.' % BUGZILLA_BUG % 654137,
for_appversions={FIREFOX_GUID: version_range('firefox',
'6.0a1', '7.0a1'),
FENNEC_GUID: version_range('fennec',
'6.0a1', '7.0a1')},
compatibility_type='warning',
tier=5)
def _expression(traverser, node):
"""
This is a helper method that allows node definitions to point at
`_traverse_node` without needing a reference to a traverser.
"""
return traverser._traverse_node(node['expression'])
def _get_this(traverser, node):
'Returns the `this` object'
if not traverser.this_stack:
from predefinedentities import GLOBAL_ENTITIES
return traverser._build_global('window', GLOBAL_ENTITIES[u'window'])
return traverser.this_stack[-1]
def _new(traverser, node):
'Returns a new copy of a node.'
# We don't actually process the arguments as part of the flow because of
# the Angry T-Rex effect. For now, we just traverse them to ensure they
# don't contain anything dangerous.
args = node['arguments']
if isinstance(args, list):
for arg in args:
traverser._traverse_node(arg, source='arguments')
else:
traverser._traverse_node(args)
elem = traverser._traverse_node(node['callee'])
if not isinstance(elem, JSWrapper):
elem = JSWrapper(elem, traverser=traverser)
if elem.is_global:
traverser._debug('Making overwritable')
elem.value = deepcopy(elem.value)
elem.value['overwritable'] = True
return elem
def _ident(traverser, node):
'Initiates an object lookup on the traverser based on an identifier token'
name = node['name']
# Ban bits like "newThread"
test_identifier(traverser, name)
if traverser._is_defined(name):
return traverser._seek_variable(name)
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
def _expr_assignment(traverser, node):
"""Evaluate an AssignmentExpression node."""
traverser._debug('ASSIGNMENT_EXPRESSION')
traverser.debug_level += 1
traverser._debug('ASSIGNMENT>>PARSING RIGHT')
right = traverser._traverse_node(node['right'])
right = JSWrapper(right, traverser=traverser)
# Treat direct assignment different than augmented assignment.
if node['operator'] == '=':
from predefinedentities import GLOBAL_ENTITIES, is_shared_scope
global_overwrite = False
readonly_value = is_shared_scope(traverser)
node_left = node['left']
traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])
if node_left['type'] == 'Identifier':
# Identifiers just need the ID name and a value to push.
# Raise a global overwrite issue if the identifier is global.
global_overwrite = traverser._is_global(node_left['name'])
# Get the readonly attribute and store its value if is_global
if global_overwrite:
global_dict = GLOBAL_ENTITIES[node_left['name']]
if 'readonly' in global_dict:
readonly_value = global_dict['readonly']
traverser._declare_variable(node_left['name'], right, type_='glob')
elif node_left['type'] == 'MemberExpression':
member_object = trace_member(traverser, node_left['object'],
instantiate=True)
global_overwrite = (member_object.is_global and
not ('overwritable' in member_object.value and
member_object.value['overwritable']))
member_property = _get_member_exp_property(traverser, node_left)
traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'
% member_property)
traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)
# Don't do the assignment if we're facing a global.
if not member_object.is_global:
if member_object.value is None:
member_object.value = JSObject()
if not member_object.is_global:
member_object.value.set(member_property, right, traverser)
else:
# It's probably better to do nothing.
pass
elif 'value' in member_object.value:
member_object_value = _expand_globals(traverser,
member_object).value
if member_property in member_object_value['value']:
# If it's a global and the actual member exists, test
# whether it can be safely overwritten.
member = member_object_value['value'][member_property]
if 'readonly' in member:
global_overwrite = True
readonly_value = member['readonly']
traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %
global_overwrite)
traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %
readonly_value)
if callable(readonly_value):
readonly_value = readonly_value(traverser, right, node['right'])
if readonly_value and global_overwrite:
kwargs = dict(
err_id=('testcases_javascript_actions',
'_expr_assignment',
'global_overwrite'),
warning='Global variable overwrite',
description='An attempt was made to overwrite a global '
'variable in some JavaScript code.')
if isinstance(readonly_value, DESCRIPTION_TYPES):
kwargs['description'] = readonly_value
elif isinstance(readonly_value, dict):
kwargs.update(readonly_value)
traverser.warning(**kwargs)
return right
lit_right = right.get_literal_value()
traverser._debug('ASSIGNMENT>>PARSING LEFT')
left = traverser._traverse_node(node['left'])
traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')
traverser.debug_level -= 1
if isinstance(left, JSWrapper):
if left.dirty:
return left
lit_left = left.get_literal_value()
token = node['operator']
# Don't perform an operation on None. Python freaks out
if lit_left is None:
lit_left = 0
if lit_right is None:
lit_right = 0
# Give them default values so we have them in scope.
gleft, gright = 0, 0
# All of the assignment operators
operators = {'=': lambda: right,
'+=': lambda: lit_left + lit_right,
'-=': lambda: gleft - gright,
'*=': lambda: gleft * gright,
'/=': lambda: 0 if gright == 0 else (gleft / gright),
'%=': lambda: 0 if gright == 0 else (gleft % gright),
'<<=': lambda: int(gleft) << int(gright),
'>>=': lambda: int(gleft) >> int(gright),
'>>>=': lambda: float(abs(int(gleft)) >> gright),
'|=': lambda: int(gleft) | int(gright),
'^=': lambda: int(gleft) ^ int(gright),
'&=': lambda: int(gleft) & int(gright)}
# If we're modifying a non-numeric type with a numeric operator, return
# NaN.
if (not isinstance(lit_left, NUMERIC_TYPES) and
token in NUMERIC_OPERATORS):
left.set_value(get_NaN(traverser), traverser=traverser)
return left
# If either side of the assignment operator is a string, both sides
# need to be casted to strings first.
if (isinstance(lit_left, types.StringTypes) or
isinstance(lit_right, types.StringTypes)):
lit_left = _get_as_str(lit_left)
lit_right = _get_as_str(lit_right)
gleft, gright = _get_as_num(left), _get_as_num(right)
traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)
if token not in operators:
# We don't support that operator. (yet?)
traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)
return left
elif token in ('<<=', '>>=', '>>>=') and gright < 0:
# The user is doing weird bitshifting that will return 0 in JS but
# not in Python.
left.set_value(0, traverser=traverser)
return left
elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
# Don't bother handling infinity for integer-converted operations.
left.set_value(get_NaN(traverser), traverser=traverser)
return left
traverser._debug('ASSIGNMENT::L-value global? (%s)' %
('Y' if left.is_global else 'N'), 1)
try:
new_value = operators[token]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
new_value = None
# Cap the length of analyzed strings.
if (isinstance(new_value, types.StringTypes) and
len(new_value) > MAX_STR_SIZE):
new_value = new_value[:MAX_STR_SIZE]
traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)
left.set_value(new_value, traverser=traverser)
return left
# Though it would otherwise be a syntax error, we say that 4=5 should
# evaluate out to 5.
return right
def _expr_binary(traverser, node):
'Evaluates a BinaryExpression node.'
traverser.debug_level += 1
# Select the proper operator.
operator = node['operator']
traverser._debug('BIN_OPERATOR>>%s' % operator)
# Traverse the left half of the binary expression.
with traverser._debug('BIN_EXP>>l-value'):
if (node['left']['type'] == 'BinaryExpression' and
'__traversal' not in node['left']):
# Process the left branch of the binary expression directly. This
# keeps the recursion cap in line and speeds up processing of
# large chains of binary expressions.
left = _expr_binary(traverser, node['left'])
node['left']['__traversal'] = left
else:
left = traverser._traverse_node(node['left'])
# Traverse the right half of the binary expression.
with traverser._debug('BIN_EXP>>r-value'):
if (operator == 'instanceof' and
node['right']['type'] == 'Identifier' and
node['right']['name'] == 'Function'):
# We make an exception for instanceof's r-value if it's a
# dangerous global, specifically Function.
return JSWrapper(True, traverser=traverser)
else:
right = traverser._traverse_node(node['right'])
traverser._debug('Is dirty? %r' % right.dirty, 1)
return _binary_op(operator, left, right, traverser)
def _binary_op(operator, left, right, traverser):
"""Perform a binary operation on two pre-traversed nodes."""
# Dirty l or r values mean we can skip the expression. A dirty value
# indicates that a lazy operation took place that introduced some
# nondeterminacy.
# FIXME(Kris): We should process these as if they're strings anyway.
if left.dirty:
return left
elif right.dirty:
return right
# Binary expressions are only executed on literals.
left = left.get_literal_value()
right_wrap = right
right = right.get_literal_value()
# Coerce the literals to numbers for numeric operations.
gleft = _get_as_num(left)
gright = _get_as_num(right)
operators = {
'==': lambda: left == right or gleft == gright,
'!=': lambda: left != right,
'===': lambda: left == right, # Be flexible.
'!==': lambda: type(left) != type(right) or left != right,
'>': lambda: left > right,
'<': lambda: left < right,
'<=': lambda: left <= right,
'>=': lambda: left >= right,
'<<': lambda: int(gleft) << int(gright),
'>>': lambda: int(gleft) >> int(gright),
'>>>': lambda: float(abs(int(gleft)) >> int(gright)),
'+': lambda: left + right,
'-': lambda: gleft - gright,
'*': lambda: gleft * gright,
'/': lambda: 0 if gright == 0 else (gleft / gright),
'%': lambda: 0 if gright == 0 else (gleft % gright),
'in': lambda: right_wrap.contains(left),
# TODO : implement instanceof
# FIXME(Kris): Treat instanceof the same as `QueryInterface`
}
output = None
if (operator in ('>>', '<<', '>>>') and
(left is None or right is None or gright < 0)):
output = False
elif operator in operators:
# Concatenation can be silly, so always turn undefineds into empty
# strings and if there are strings, make everything strings.
if operator == '+':
if left is None:
left = ''
if right is None:
right = ''
if isinstance(left, basestring) or isinstance(right, basestring):
left = _get_as_str(left)
right = _get_as_str(right)
# Don't even bother handling infinity if it's a numeric computation.
if (operator in ('<<', '>>', '>>>') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
return get_NaN(traverser)
try:
output = operators[operator]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
output = None
# Cap the length of analyzed strings.
if (isinstance(output, types.StringTypes) and
len(output) > MAX_STR_SIZE):
output = output[:MAX_STR_SIZE]
wrapper = JSWrapper(output, traverser=traverser)
# Test the newly-created literal for dangerous values.
# This may cause duplicate warnings for strings which
# already match a dangerous value prior to concatenation.
test_literal(traverser, wrapper)
return wrapper
return JSWrapper(output, traverser=traverser)
def _expr_unary(traverser, node):
"""Evaluate a UnaryExpression node."""
expr = traverser._traverse_node(node['argument'])
expr_lit = expr.get_literal_value()
expr_num = _get_as_num(expr_lit)
operators = {'-': lambda: -1 * expr_num,
'+': lambda: expr_num,
'!': lambda: not expr_lit,
'~': lambda: -1 * (expr_num + 1),
'void': lambda: None,
'typeof': lambda: _expr_unary_typeof(expr),
'delete': lambda: None} # We never want to empty the context
if node['operator'] in operators:
output = operators[node['operator']]()
else:
output = None
if not isinstance(output, JSWrapper):
output = JSWrapper(output, traverser=traverser)
return output
def _expr_unary_typeof(wrapper):
"""Evaluate the "typeof" value for a JSWrapper object."""
if (wrapper.callable or
(wrapper.is_global and 'return' in wrapper.value and
'value' not in wrapper.value)):
return 'function'
value = wrapper.value
if value is None:
return 'undefined'
elif isinstance(value, JSLiteral):
value = value.value
if isinstance(value, bool):
return 'boolean'
elif isinstance(value, (int, long, float)):
return 'number'
elif isinstance(value, types.StringTypes):
return 'string'
return 'object'
def _get_as_num(value):
"""Return the JS numeric equivalent for a value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return 0
try:
if isinstance(value, types.StringTypes):
if value.startswith('0x'):
return int(value, 16)
else:
return float(value)
elif isinstance(value, (int, float, long)):
return value
else:
return int(value)
except (ValueError, TypeError):
return 0
def _get_as_str(value):
"""Return the JS string equivalent for a literal value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return ''
if isinstance(value, bool):
return u'true' if value else u'false'
elif isinstance(value, (int, float, long)):
if value == float('inf'):
return u'Infinity'
elif value == float('-inf'):
return u'-Infinity'
# Try to see if we can shave off some trailing significant figures.
try:
if int(value) == value:
return unicode(int(value))
except ValueError:
pass
return unicode(value)
| 2.046875 | 2 |
geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0 | 13482 | #!/usr/bin/env python
"""
Test code for the BBox Object
"""
import numpy as np
import pytest
from geometry_utils.bound_box import (BBox,
asBBox,
NullBBox,
InfBBox,
fromBBArray,
from_points,
)
class TestConstructors():
def test_creates(self):
B = BBox(((0, 0), (5, 5)))
assert isinstance(B, BBox)
def test_type(self):
B = np.array(((0, 0), (5, 5)))
assert not isinstance(B, BBox)
def testDataType(self):
B = BBox(((0, 0), (5, 5)))
assert B.dtype == np.float
def testShape(self):
B = BBox((0, 0, 5, 5))
assert B.shape == (2, 2)
def testShape2(self):
with pytest.raises(ValueError):
BBox((0, 0, 5))
def testShape3(self):
with pytest.raises(ValueError):
BBox((0, 0, 5, 6, 7))
def testArrayConstruction(self):
A = np.array(((4, 5), (10, 12)), np.float_)
B = BBox(A)
assert isinstance(B, BBox)
def testMinMax(self):
with pytest.raises(ValueError):
BBox((0, 0, -1, 6))
def testMinMax2(self):
with pytest.raises(ValueError):
BBox((0, 0, 1, -6))
def testMinMax3(self):
# OK to have a zero-sized BB
B = BBox(((0, 0), (0, 5)))
assert isinstance(B, BBox)
def testMinMax4(self):
# OK to have a zero-sized BB
B = BBox(((10., -34), (10., -34.0)))
assert isinstance(B, BBox)
def testMinMax5(self):
# OK to have a tiny BB
B = BBox(((0, 0), (1e-20, 5)))
assert isinstance(B, BBox)
def testMinMax6(self):
# Should catch tiny difference
with pytest.raises(ValueError):
BBox(((0, 0), (-1e-20, 5)))
class TestAsBBox():
def testPassThrough(self):
B = BBox(((0, 0), (5, 5)))
C = asBBox(B)
assert B is C
def testPassThrough2(self):
B = ((0, 0), (5, 5))
C = asBBox(B)
assert B is not C
def testPassArray(self):
# Different data type
A = np.array(((0, 0), (5, 5)))
C = asBBox(A)
assert A is not C
def testPassArray2(self):
# same data type -- should be a view
A = np.array(((0, 0), (5, 5)), np.float_)
C = asBBox(A)
A[0, 0] = -10
assert C[0, 0] == A[0, 0]
class TestIntersect():
def testSame(self):
B = BBox(((-23.5, 456), (56, 532.0)))
C = BBox(((-23.5, 456), (56, 532.0)))
assert B.Overlaps(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert B.Overlaps(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert B.Overlaps(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert B.Overlaps(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert B.Overlaps(C)
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not B.Overlaps(C)
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not B.Overlaps(C)
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not B.Overlaps(C)
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not B.Overlaps(C)
def testInside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-12, -22), (-6, -8)))
assert B.Overlaps(C)
def testOutside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-17, -26), (3, 0)))
assert B.Overlaps(C)
def testTouch(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 8), (17.95, 32)))
assert B.Overlaps(C)
def testCorner(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (17.95, 32)))
assert B.Overlaps(C)
def testZeroSize(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (15, 25)))
assert B.Overlaps(C)
def testZeroSize2(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((15, 25), (15, 25)))
assert not B.Overlaps(C)
def testZeroSize3(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((0, 8), (10, 12)))
assert B.Overlaps(C)
def testZeroSize4(self):
B = BBox(((5, 1), (10, 25)))
C = BBox(((8, 8), (8, 8)))
assert B.Overlaps(C)
class TestEquality():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B == C
def testIdentical(self):
B = BBox(((1.0, 2.0), (5., 10.)))
assert B == B
def testNotSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.1)))
assert not B == C
def testWithArray(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert B == C
def testWithArray2(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert C == B
def testWithArray3(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.01, 2.0), (5., 10.)))
assert not C == B
class TestInside():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B.Inside(C)
def testPoint(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((3.0, 4.0), (3.0, 4.0)))
assert B.Inside(C)
def testPointOutside(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((-3.0, 4.0), (0.10, 4.0)))
assert not B.Inside(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert not B.Inside(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert not B.Inside(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert not B.Inside(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert not (B.Inside(C))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not (B.Inside(C))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not (B.Inside(C))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not (B.Inside(C))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not (B.Inside(C))
class TestPointInside():
def testPointIn(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 4.0)
assert (B.PointInside(P))
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 30)
assert not (B.PointInside(P))
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 30)
assert not (B.PointInside(P))
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 4)
assert not (B.PointInside(P))
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (-10, 5)
assert not (B.PointInside(P))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 5)
assert not (B.PointInside(P))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 25.001)
assert not (B.PointInside(P))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 12)
assert not (B.PointInside(P))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
P = (17.1, 12.3)
assert not (B.PointInside(P))
def testPointOnTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 10.)
assert (B.PointInside(P))
def testPointLeftTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (-3.0, 10.)
assert not (B.PointInside(P))
def testPointOnBottomLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 5.)
assert (B.PointInside(P))
def testPointOnLeft(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-10, -5.)
assert (B.PointInside(P))
def testPointOnRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -5.)
assert (B.PointInside(P))
def testPointOnBottomRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -10.)
assert (B.PointInside(P))
class Test_from_points():
def testCreate(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)), np.float64)
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testCreateInts(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)))
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testSinglePoint(self):
Pts = np.array((5, 2), np.float_)
B = from_points(Pts)
assert (B[0, 0] == 5. and
B[0, 1] == 2.0 and
B[1, 0] == 5. and
B[1, 1] == 2.0)
def testListTuples(self):
Pts = [(3, 6.5), (13, 43.2), (-4.32, -4), (65, -23), (-0.0001,
23.432)]
B = from_points(Pts)
assert (B[0, 0] == -4.32 and
B[0, 1] == -23.0 and
B[1, 0] == 65.0 and
B[1, 1] == 43.2)
class TestMerge():
A = BBox(((-23.5, 456), (56, 532.0)))
B = BBox(((-20.3, 460), (54, 465))) # B should be completely inside A
C = BBox(((-23.5, 456), (58, 540.))) # up and to the right or A
D = BBox(((-26.5, 12), (56, 532.0)))
def testInside(self):
C = self.A.copy()
C.Merge(self.B)
assert (C == self.A)
def testFullOutside(self):
C = self.B.copy()
C.Merge(self.A)
assert (C == self.A)
def testUpRight(self):
A = self.A.copy()
A.Merge(self.C)
assert (A[0] == self.A[0] and A[1] == self.C[1])
def testDownLeft(self):
A = self.A.copy()
A.Merge(self.D)
assert (A[0] == self.D[0] and A[1] == self.A[1])
class TestWidthHeight():
B = BBox(((1.0, 2.0), (5., 10.)))
def testWidth(self):
assert (self.B.Width == 4.0)
def testWidth2(self):
assert (self.B.Height == 8.0)
def testSetW(self):
with pytest.raises(AttributeError):
self.B.Height = 6
def testSetH(self):
with pytest.raises(AttributeError):
self.B.Width = 6
class TestCenter():
B = BBox(((1.0, 2.0), (5., 10.)))
def testCenter(self):
assert ((self.B.Center == (3.0, 6.0)).all())
def testSetCenter(self):
with pytest.raises(AttributeError):
self.B.Center = (6, 5)
class TestBBarray():
BBarray = np.array((((-23.5, 456), (56, 532.0)), ((-20.3, 460),
(54, 465)), ((-23.5, 456), (58, 540.)), ((-26.5,
12), (56, 532.0))), dtype=np.float)
BB = asBBox(((-26.5, 12.), (58., 540.)))
def testJoin(self):
BB = fromBBArray(self.BBarray)
assert BB == self.BB
class TestNullBBox():
B1 = NullBBox()
B2 = NullBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
def testValues(self):
assert (np.alltrue(np.isnan(self.B1)))
def testIsNull(self):
assert (self.B1.IsNull)
def testEquals(self):
assert ((self.B1 == self.B2) is True)
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B3, 'merge failed, got: %s' % C
def testOverlaps(self):
assert self.B1.Overlaps(self.B3) is False
def testOverlaps2(self):
assert self.B3.Overlaps(self.B1) is False
class TestInfBBox():
B1 = InfBBox()
B2 = InfBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
NB = NullBBox()
def testValues(self):
assert (np.alltrue(np.isinf(self.B1)))
# def testIsNull(self):
# assert ( self.B1.IsNull )
def testEquals(self):
assert self.B1 == self.B2
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert self.B1 != self.B3
def testNotEquals3(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B2, 'merge failed, got: %s' % C
def testMerge2(self):
C = self.B3.copy()
C.Merge(self.B1)
assert C == self.B1, 'merge failed, got: %s' % C
def testOverlaps(self):
assert (self.B1.Overlaps(self.B2) is True)
def testOverlaps2(self):
assert (self.B3.Overlaps(self.B1) is True)
def testOverlaps3(self):
assert (self.B1.Overlaps(self.B3) is True)
def testOverlaps4(self):
assert (self.B1.Overlaps(self.NB) is True)
def testOverlaps5(self):
assert (self.NB.Overlaps(self.B1) is True)
class TestSides():
B = BBox(((1.0, 2.0), (5., 10.)))
def testLeft(self):
assert self.B.Left == 1.0
def testRight(self):
assert self.B.Right == 5.0
def testBottom(self):
assert self.B.Bottom == 2.0
def testTop(self):
assert self.B.Top == 10.0
class TestAsPoly():
B = BBox(((5, 0), (10, 20)))
corners = np.array([(5., 0.), (5., 20.), (10., 20.), (10., 0.)],
dtype=np.float64)
def testCorners(self):
print(self.B.AsPoly())
assert np.array_equal(self.B.AsPoly(), self.corners)
| 2.578125 | 3 |
cresi/net/augmentations/functional.py | ankshah131/cresi | 117 | 13483 | import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import math
from functools import wraps
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function
def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args
def vflip(img):
return cv2.flip(img, 0)
def hflip(img):
return cv2.flip(img, 1)
def flip(img, code):
return cv2.flip(img, code)
def transpose(img):
return img.transpose(1, 0, 2) if len(img.shape) > 2 else img.transpose(1, 0)
def rot90(img, times):
img = np.rot90(img, times)
return np.ascontiguousarray(img)
def rotate(img, angle):
"""
rotate image on specified angle
:param angle: angle in degrees
"""
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
img = cv2.warpAffine(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def shift_scale_rotate(img, angle, scale, dx, dy):
"""
:param angle: in degrees
:param scale: relative scale
"""
height, width = img.shape[:2]
cc = math.cos(angle/180*math.pi) * scale
ss = math.sin(angle/180*math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width/2, height/2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def center_crop(img, height, width):
h, w, c = img.shape
dy = (h-height)//2
dx = (w-width)//2
y1 = dy
y2 = y1 + height
x1 = dx
x2 = x1 + width
img = img[y1:y2, x1:x2, :]
return img
def shift_hsv(img, hue_shift, sat_shift, val_shift):
dtype = img.dtype
maxval = np.max(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32)
h, s, v = cv2.split(img)
h = cv2.add(h, hue_shift)
h = np.where(h < 0, maxval - h, h)
h = np.where(h > maxval, h - maxval, h)
h = h.astype(dtype)
s = clip(cv2.add(s, sat_shift), dtype, maxval)
v = clip(cv2.add(v, val_shift), dtype, maxval)
img = cv2.merge((h, s, v)).astype(dtype)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def shift_channels(img, r_shift, g_shift, b_shift):
img[...,0] = clip(img[...,0] + r_shift, np.uint8, 255)
img[...,1] = clip(img[...,1] + g_shift, np.uint8, 255)
img[...,2] = clip(img[...,2] + b_shift, np.uint8, 255)
return img
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
return img_output
def blur(img, ksize):
return cv2.blur(img, (ksize, ksize))
def invert(img):
return 255 - img
def channel_shuffle(img):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img
def img_to_tensor(im, verbose=False):
'''AVE edit'''
im_out = np.moveaxis(im / (255. if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): im_out.shape:", im_out.shape)
print ("im_out.unique:", np.unique(im_out))
return im_out
def mask_to_tensor(mask, num_classes, verbose=False):
'''AVE edit'''
if num_classes > 1:
mask = img_to_tensor(mask)
else:
mask = np.expand_dims(mask / (255. if mask.dtype == np.uint8 else 1), 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): mask.shape:", mask.shape)
print ("mask.unique:", np.unique(mask))
return mask
| 2.890625 | 3 |
regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | 29 | 13484 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 09:49:13 2017
@author: vmg
"""
import os
import buildingspy.development.regressiontest as r
rt = r.Tester(check_html=False)#,tool="dymola")
LibPath = os.path.join("TRANSFORM")
ResPath = LibPath
rt.showGUI(True)
rt.setLibraryRoot(LibPath, ResPath)
rt.setNumberOfThreads(1)
#rt.TestSinglePackage('Media.Solids.Examples.Hastelloy_N_Haynes', SinglePack=True)
rt.run()
| 1.859375 | 2 |
src/deoxys/model/activations.py | huynhngoc/deoxys | 1 | 13485 | # -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from ..keras.layers import Activation
from ..keras.activations import deserialize
from ..utils import Singleton
class Activations(metaclass=Singleton):
"""
A singleton that contains all the registered customized activations
"""
def __init__(self):
self._activations = {}
def register(self, key, activation):
if not issubclass(activation, Activation):
raise ValueError(
"The customized activation has to be a subclass"
+ " of keras.activations.Activation"
)
if key in self._activations:
raise KeyError(
"Duplicated key, please use another key for this activation"
)
else:
self._activations[key] = activation
def unregister(self, key):
if key in self._activations:
del self._activations[key]
@property
def activations(self):
return self._activations
def register_activation(key, activation):
"""
Register the customized activation.
If the key name is already registered, it will raise a KeyError exception
Parameters
----------
key: str
The unique key-name of the activation
activation: tensorflow.keras.activations.Activation
The customized activation class
"""
Activations().register(key, activation)
def unregister_activation(key):
"""
Remove the registered activation with the key-name
Parameters
----------
key: str
The key-name of the activation to be removed
"""
Activations().unregister(key)
def activation_from_config(config):
if 'class_name' not in config:
raise ValueError('class_name is needed to define activation')
if 'config' not in config:
# auto add empty config for activation with only class_name
config['config'] = {}
return deserialize(config, custom_objects=Activations().activations)
| 2.78125 | 3 |
raspisump/reading.py | seanm/raspi-sump | 79 | 13486 | <gh_stars>10-100
""" Module to take a water_level reading."""
# Raspi-sump, a sump pump monitoring system.
# <NAME>
# http://www.linuxnorth.org/raspi-sump/
#
# All configuration changes should be done in raspisump.conf
# MIT License -- http://www.linuxnorth.org/raspi-sump/license.html
try:
import ConfigParser as configparser # Python2
except ImportError:
import configparser # Python3
from hcsr04sensor import sensor
from raspisump import log, alerts, heartbeat
config = configparser.RawConfigParser()
config.read("/home/pi/raspi-sump/raspisump.conf")
configs = {
"critical_water_level": config.getint("pit", "critical_water_level"),
"pit_depth": config.getint("pit", "pit_depth"),
"temperature": config.getint("pit", "temperature"),
"trig_pin": config.getint("gpio_pins", "trig_pin"),
"echo_pin": config.getint("gpio_pins", "echo_pin"),
"unit": config.get("pit", "unit"),
}
# If item in raspisump.conf add to configs dict. If not provide defaults.
try:
configs["alert_when"] = config.get("pit", "alert_when")
except configparser.NoOptionError:
configs["alert_when"] = "high"
try:
configs["heartbeat"] = config.getint("email", "heartbeat")
except configparser.NoOptionError:
configs["heartbeat"] = 0
def initiate_heartbeat():
"""Initiate the heartbeat email process if needed"""
if configs["heartbeat"] == 1:
heartbeat.determine_if_heartbeat()
else:
pass
def water_reading():
"""Initiate a water level reading."""
pit_depth = configs["pit_depth"]
trig_pin = configs["trig_pin"]
echo_pin = configs["echo_pin"]
temperature = configs["temperature"]
unit = configs["unit"]
value = sensor.Measurement(trig_pin, echo_pin, temperature, unit)
try:
raw_distance = value.raw_distance(sample_wait=0.3)
except SystemError:
log.log_errors(
"**ERROR - Signal not received. Possible cable or sensor problem."
)
exit(0)
return round(value.depth(raw_distance, pit_depth), 1)
def water_depth():
"""Determine the depth of the water, log result and generate alert
if needed.
"""
critical_water_level = configs["critical_water_level"]
water_depth = water_reading()
if water_depth < 0.0:
water_depth = 0.0
log.log_reading(water_depth)
if water_depth > critical_water_level and configs["alert_when"] == "high":
alerts.determine_if_alert(water_depth)
elif water_depth < critical_water_level and configs["alert_when"] == "low":
alerts.determine_if_alert(water_depth)
else:
pass
initiate_heartbeat()
| 2.75 | 3 |
pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 0 | 13487 | '''
Configuration generation for running Pancreas datasets
'''
import os, argparse
from pipelines import method_utils, dataloading_utils
from preprocess.process_train_test_data import *
if __name__ == "__main__":
data_dir = "~/gpu/data"
## parse arguments
import argparse
parser = argparse.ArgumentParser(description="Celltyping pipeline.")
parser.add_argument('data_source', help="Load which dataset",
choices=[
'pancreas', 'pancreas_seg_cond', 'pancreas_custom',
'pancreas_seg_mix', 'pancreas_multi_to_multi'
])
parser.add_argument('-m', '--method', help="Run which method",
choices=['MLP', 'MLP_GO', 'MLP_CP', 'GEDFN', 'ItClust', 'SVM_RBF', 'SVM_linear', 'RF'], ## remove DFN
required=True)
parser.add_argument('--select_on', help="Feature selection on train or test, or None of them",
choices=['train', 'test'])
parser.add_argument('--select_method', help="Feature selection method, Seurat/FEAST or None",
choices=['Seurat', 'FEAST', 'F-test'])
parser.add_argument('--n_features', help="Number of features selected",
default=1000, type=int)
parser.add_argument('--train', help="Specify which as train", required=True)
parser.add_argument('--test', help="Specify which as test", required=True)
parser.add_argument('--sample_seed', help="Downsample seed in combined individual effect",
default=0, type=int)
args = parser.parse_args()
pipeline_dir = "pipelines/result_Pancreas_collections"
result_prefix = pipeline_dir+os.sep+"result_"+args.data_source+'_'+\
args.train+'_to_'+args.test
os.makedirs(result_prefix, exist_ok=True)
## create file directory
if args.select_on is None and args.select_method is None:
result_dir = result_prefix+os.sep+"no_feature"
else:
result_dir = result_prefix+os.sep+args.select_method+'_'+\
str(args.n_features)+'_on_'+args.select_on
os.makedirs(result_dir, exist_ok=True)
load_ind, train_adata, test_adata = load_adata(result_dir)
if not load_ind:
train_adata, test_adata = dataloading_utils.load_Pancreas_adata(
data_dir, result_dir, args=args)
## whether to purify reference dataset
purify_method = ""
if "purify_dist" in args.data_source:
purify_method = "distance"
elif "purify_SVM" in args.data_source:
purify_method = "SVM"
train_adata, test_adata = dataloading_utils.process_loaded_data(
train_adata, test_adata, result_dir, args=args, purify_method=purify_method)
print("Train anndata: \n", train_adata)
print("Test anndata: \n", test_adata)
method_utils.run_pipeline(args, train_adata, test_adata, data_dir, result_dir)
| 2.390625 | 2 |
MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 0 | 13488 | import pandas as pd
from sklearn.preprocessing import StandardScaler
def stand_demo():
data = pd.read_csv("dating.txt")
print(data)
transfer = StandardScaler()
data = transfer.fit_transform(data[['milage', 'Liters', 'Consumtime']])
print("Standardization result: \n", data)
print("Mean of each figure: \n", transfer.mean_)
print("Variance of each figure: \n", transfer.var_)
return None
stand_demo()
| 3.203125 | 3 |
tests/test_primitive_roots.py | greysonDEV/rng | 0 | 13489 | <reponame>greysonDEV/rng
from prng.util.util import primitive_roots
import pytest
def test_primitive_roots():
prim_roots_sets = [
[3, [2]],
[7, [3,5]],
[13, [2,6,7,11]],
[17, [3,5,6,7,10,11,12,14]],
[19, [2,3,10,13,14,15]],
[31, [3,11,12,13,17,21,22,24]],
[53, [2,3,5,8,12,14,18,19,20,21,22,26,27,31,32,33,34,35,39,41,45,48,50,51]],
[61, [2,6,7,10,17,18,26,30,31,35,43,44,51,54,55,59]],
[79, [3,6,7,28,29,30,34,35,37,39,43,47,48,53,54,59,60,63,66,68,70,74,75,77]],
[103, [5,6,11,12,20,21,35,40,43,44,45,48,51,53,54,62,65,67,70,71,74,75,77,78,84,85,86,87,88,96,99,101]],
]
assert all(sorted(primitive_roots(a)) == prs for a,prs in prim_roots_sets)
| 2.125 | 2 |
hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | 2 | 13490 | <reponame>fmelihh/competitive-programming-solutions
# https://www.hackerrank.com/challenges/xml-1-find-the-score/problem
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
return etree.tostring(node).count(b'=')
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
| 3.796875 | 4 |
CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 22 | 13491 | test = int(input())
while test > 0 :
n,k = map(int,input().split())
p = list(map(int,input().split()))
original = 0
later = 0
for i in p :
if i > k :
later += k
original += i
else :
later += i
original += i
print(original-later)
test -= 1 | 2.671875 | 3 |
src/utils/Shell.py | vlab-cs-ucsb/quacky | 1 | 13492 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 18 22:20:01 2014
@author: baki
"""
import shlex
from subprocess import Popen, PIPE
from .Log import Log
class Shell:
def __init__(self, TAG=""):
self.log = Log(TAG=TAG)
self.current_process = None
self.process_output = None
def setTag(self, tag):
self.log.setTag(tag)
def runcmd(self, cmd, cwd=None, shell=False):
# self.log.v("cmd: {}\n with params: cwd={}, shell={}".format(cmd, cwd, shell))
args = shlex.split(cmd)
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=cwd, shell=shell)
out, err = p.communicate()
if out:
out = out.decode("ascii")
# self.log.v("cmd output: {}\n".format(out))
if err:
err = err.decode("ascii")
# self.log.v("cmd error: {}\n".format(err))
return out, err
def runcmdBgrnd(self, cmd, out=PIPE, cwd=None, shell=False):
assert self.current_process == None, "currently, one shell object supports only one background process"
self.log.v("cmd: {}\n with params: out={}, cwd={}, shell={}".format(cmd, out, cwd, shell))
redirect_to = out
if out is not PIPE:
assert self.process_output == None, "currently, one shell object supports only one background process"
redirect_to = open(out, "w")
args = shlex.split(cmd)
p = Popen(args, stdout=redirect_to, stderr=redirect_to, cwd=cwd, shell=shell)
self.current_process = p
self.process_output = redirect_to
return p
def kill(self, process=None):
if process is None:
process = self.current_process
process and process.kill()
self.process_output and self.process_output.close()
def terminate(self, process=None):
if process is None:
process = self.current_process
process and process.terminate()
self.process_output and self.process_output.close()
def runGrep(self, search, subject, options):
cmd = "grep {} \"{}\" {}".format(options, search, subject)
return self.runcmd(cmd)
def rm(self, name):
cmd = "rm {}".format(name)
return self.runcmd(cmd)
def rmdir(self, name):
cmd = "rmdir {}".format(name)
return self.runcmd(cmd)
def rmrdir(self, name):
cmd = "rm -r {}".format(name)
return self.runcmd(cmd)
def mv(self, src, dst):
cmd = "mv {} {}".format(src, dst)
return self.runcmd(cmd)
def cp(self, src, dst):
cmd = "cp -r {} {}".format(src, dst)
return self.runcmd(cmd)
def mkdir(self, name):
cmd = "mkdir {} -p".format(name)
return self.runcmd(cmd)
def clean(self, name):
self.rmrdir(name)
self.mkdir(name)
| 2.5625 | 3 |
scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 0 | 13493 | import os
import six
import copy
import pickle
import random
import logging
from scrapy.http import Request
from scrapy.exceptions import NotConfigured
from scrapy.commands.genspider import sanitize_module_name
from scrapy.spiders import CrawlSpider
from .utils import (
add_sample,
response_to_dict,
get_or_create_test_dir,
parse_request,
parse_object,
get_project_dir,
get_middlewares,
create_dir,
)
logger = logging.getLogger(__name__)
def _copy_settings(settings):
out = {}
for name in settings.getlist('AUTOUNIT_INCLUDED_SETTINGS', []):
out[name] = settings.get(name)
return out
class AutounitMiddleware:
def __init__(self, settings):
if not any(
self.__class__.__name__ in s
for s in settings.getwithbase('SPIDER_MIDDLEWARES').keys()
):
raise ValueError(
'%s must be in SPIDER_MIDDLEWARES' % (
self.__class__.__name__,))
if not settings.getbool('AUTOUNIT_ENABLED'):
raise NotConfigured('scrapy-autounit is not enabled')
if settings.getint('CONCURRENT_REQUESTS') > 1:
logger.warn(
'Recording with concurrency > 1! '
'Data races in shared object modification may create broken '
'tests.'
)
self.max_fixtures = settings.getint(
'AUTOUNIT_MAX_FIXTURES_PER_CALLBACK',
default=10
)
self.max_fixtures = \
self.max_fixtures if self.max_fixtures >= 10 else 10
self.base_path = settings.get(
'AUTOUNIT_BASE_PATH',
default=os.path.join(get_project_dir(), 'autounit')
)
create_dir(self.base_path, exist_ok=True)
self.fixture_counters = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_spider_input(self, response, spider):
filter_args = {'crawler', 'settings', 'start_urls'}
if isinstance(spider, CrawlSpider):
filter_args |= {'rules', '_rules'}
response.meta['_autounit'] = pickle.dumps({
'request': parse_request(response.request, spider),
'response': response_to_dict(response),
'spider_args': {
k: v for k, v in spider.__dict__.items()
if k not in filter_args
},
'middlewares': get_middlewares(spider),
})
return None
def process_spider_output(self, response, result, spider):
settings = spider.settings
processed_result = []
out = []
for elem in result:
out.append(elem)
is_request = isinstance(elem, Request)
if is_request:
_data = parse_request(elem, spider)
else:
_data = parse_object(copy.deepcopy(elem), spider)
processed_result.append({
'type': 'request' if is_request else 'item',
'data': _data
})
input_data = pickle.loads(response.meta.pop('_autounit'))
request = input_data['request']
callback_name = request['callback']
spider_attr_out = {
k: v for k, v in spider.__dict__.items()
if k not in ('crawler', 'settings', 'start_urls')
}
data = {
'spider_name': spider.name,
'request': request,
'response': input_data['response'],
'spider_args_out': spider_attr_out,
'result': processed_result,
'spider_args_in': input_data['spider_args'],
'settings': _copy_settings(settings),
'middlewares': input_data['middlewares'],
'python_version': 2 if six.PY2 else 3,
}
callback_counter = self.fixture_counters.setdefault(callback_name, 0)
self.fixture_counters[callback_name] += 1
test_dir, test_name = get_or_create_test_dir(
self.base_path,
sanitize_module_name(spider.name),
callback_name,
settings.get('AUTOUNIT_EXTRA_PATH'),
)
if callback_counter < self.max_fixtures:
add_sample(callback_counter + 1, test_dir, test_name, data)
else:
r = random.randint(0, callback_counter)
if r < self.max_fixtures:
add_sample(r + 1, test_dir, test_name, data)
return out
| 2.140625 | 2 |
python_examples/create_tags/utils.py | kirank0220/api-examples | 1 | 13494 | <reponame>kirank0220/api-examples
#########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import os
import json
import requests
from collections import OrderedDict
from openpyxl import Workbook
from openpyxl.styles.fills import FILL_SOLID
from openpyxl.styles import Color, PatternFill, Font, Border, Side
from openpyxl.styles import colors
from openpyxl.cell import Cell
from tqdm import tqdm
from glom import glom
def _cell_value(cell):
return "{}".format(cell.value).strip() if cell and cell.value else ""
def columns_for_headers(row, header_map):
mapping = {}
for idx, col in enumerate(row):
column = _cell_value(col)
if column and header_map.get(column, None):
mapping[idx] = header_map.get(column, None)
return mapping
def process_companies(sheet, header_mapping):
companies = []
headers = {}
for _, row in enumerate(sheet.iter_rows()):
if not headers:
headers = columns_for_headers(row, header_mapping)
if headers and len(headers) != 2:
print(headers)
raise Exception("Need column headers for both company names and tags")
else:
company = OrderedDict()
for column_index, col in enumerate(row):
if column_index not in headers:
continue
if col.value is not None:
try:
company[headers[column_index]] = bytearray(col.value, "utf-8").decode("utf-8")
except:
company[headers[column_index]] = col.value
if not company:
continue
if "tags" not in company:
print("Company did not have any tags: ", company, " did you provide the correct column header?")
continue
if "name" not in company:
print("Company did not have a name: ", company, " did you provide the correct column header?")
continue
company["tags"] = [str(tag).strip() for tag in company["tags"].split(",") if tag and str(tag).strip()]
if not company["tags"]:
print("Company did not have any tags: ", company)
else:
companies.append(company)
return companies
| 2.515625 | 3 |
gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 1 | 13495 | <reponame>phumm/gpytorch
#!/usr/bin/env python3
import torch
from .lazy_tensor import LazyTensor
class NonLazyTensor(LazyTensor):
def _check_args(self, tsr):
if not torch.is_tensor(tsr):
return "NonLazyTensor must take a torch.Tensor; got {}".format(tsr.__class__.__name__)
if tsr.dim() < 2:
return "NonLazyTensor expects a matrix (or batches of matrices) - got a Tensor of size {}.".format(
tsr.shape
)
def __init__(self, tsr):
"""
Not a lazy tensor
Args:
- tsr (Tensor: matrix) a Tensor
"""
super(NonLazyTensor, self).__init__(tsr)
self.tensor = tsr
def _expand_batch(self, batch_shape):
return self.__class__(self.tensor.expand(*batch_shape, *self.matrix_shape))
def _get_indices(self, row_index, col_index, *batch_indices):
# Perform the __getitem__
res = self.tensor[(*batch_indices, row_index, col_index)]
return res
def _getitem(self, row_index, col_index, *batch_indices):
# Perform the __getitem__
res = self.tensor[(*batch_indices, row_index, col_index)]
return self.__class__(res)
def _matmul(self, rhs):
return torch.matmul(self.tensor, rhs)
def _prod_batch(self, dim):
return self.__class__(self.tensor.prod(dim))
def _quad_form_derivative(self, left_vecs, right_vecs):
res = left_vecs.matmul(right_vecs.transpose(-1, -2))
return (res,)
def _size(self):
return self.tensor.size()
def _sum_batch(self, dim):
return self.__class__(self.tensor.sum(dim))
def _transpose_nonbatch(self):
return NonLazyTensor(self.tensor.transpose(-1, -2))
def _t_matmul(self, rhs):
return torch.matmul(self.tensor.transpose(-1, -2), rhs)
def diag(self):
if self.tensor.ndimension() < 3:
return self.tensor.diag()
else:
row_col_iter = torch.arange(0, self.matrix_shape[-1], dtype=torch.long, device=self.device)
return self.tensor[..., row_col_iter, row_col_iter].view(*self.batch_shape, -1)
def evaluate(self):
return self.tensor
def __add__(self, other):
if isinstance(other, NonLazyTensor):
return NonLazyTensor(self.tensor + other.tensor)
else:
return super(NonLazyTensor, self).__add__(other)
def mul(self, other):
if isinstance(other, NonLazyTensor):
return NonLazyTensor(self.tensor * other.tensor)
else:
return super(NonLazyTensor, self).mul(other)
def lazify(obj):
"""
A function which ensures that `obj` is a LazyTensor.
If `obj` is a LazyTensor, this function does nothing.
If `obj` is a (normal) Tensor, this function wraps it with a `NonLazyTensor`.
"""
if torch.is_tensor(obj):
return NonLazyTensor(obj)
elif isinstance(obj, LazyTensor):
return obj
else:
raise TypeError("object of class {} cannot be made into a LazyTensor".format(obj.__class__.__name__))
__all__ = ["NonLazyTensor", "lazify"]
| 2.421875 | 2 |
aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 20 | 13496 | <filename>aoc_wim/aoc2019/q19.py<gh_stars>10-100
"""
--- Day 19: Tractor Beam ---
https://adventofcode.com/2019/day/19
"""
from aocd import data
from aoc_wim.aoc2019 import IntComputer
from aoc_wim.zgrid import ZGrid
from aoc_wim.search import Bisect
import functools
@functools.lru_cache(maxsize=100**2)
def beam(z):
comp = IntComputer(data, inputs=[int(z.imag), int(z.real)])
comp.run(until=IntComputer.op_output)
[result] = comp.output
return result
def left_edge_of_beam(y, gradient, beam=beam):
x = int(y / gradient)
z = x + y*1j
if beam(z):
while beam(z - 1):
z -= 1
else:
while not beam(z + 1):
z += 1
z += 1
assert beam(z) and not beam(z - 1)
return z
def locate_square(beam, width, gradient_estimate=1., hi=None):
d = width - 1
def check(y):
z = left_edge_of_beam(y, gradient_estimate, beam)
val = beam(z + d * ZGrid.NE)
print(f"y={y}", "wide" if val else "narrow")
return val
bisect = Bisect(check, lo=d, hi=hi)
print("bisecting...")
y = bisect.run() + 1
z = left_edge_of_beam(y, gradient_estimate, beam) + d * ZGrid.N
return z
if __name__ == "__main__":
print("populating 50x50 zgrid...")
grid = ZGrid()
x0 = 0
for y in range(50):
on = False
for x in range(x0, 50):
z = x + y * 1j
val = grid[z] = beam(z)
if not on and val:
on = True
x0 = x
if x0:
m = y / x0
if on and not val:
break
print("part a", sum(grid.values()))
grid.translate({0: ".", 1: "#"})
grid.draw()
print("initial gradient is approx -->", m)
print("refining gradient estimate -->", end=" ")
z = left_edge_of_beam(2000, gradient=m)
m = z.imag/z.real
print(m)
z = locate_square(beam, width=100, gradient_estimate=m)
print("part b", int(z.real)*10000 + int(z.imag))
| 2.765625 | 3 |
BlurDetection.py | samaritan-security/samaritan-backend | 0 | 13497 | <filename>BlurDetection.py
import cv2
def variance_of_laplacian(image):
return cv2.Laplacian(image, cv2.CV_64F).var()
"""
checks if an image is blurry
returns True if blurry, False otherwise
"""
def detect_blurry_image(image, threshold):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = variance_of_laplacian(image)
if(blur < threshold):
return True
return False | 3.5625 | 4 |
python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 0 | 13498 | <gh_stars>0
def main():
# Arithmetic operators
a = 7
b = 2
print(f'{a} + {b} = {a+b}')
print(f'{a} - {b} = {a-b}')
print(f'{a} * {b} = {a*b}')
print(f'{a} / {b} = {a/b}')
print(f'{a} // {b} = {a//b}')
print(f'{a} % {b} = {a%b}')
print(f'{a} ^ {b} = {a**b}')
# Bitwise operators
# &, |, ^, <<, >>
print(f'{a} & {b} = {a&b}')
print(f'{a} | {b} = {a|b}')
print(f'{a} ^ {b} = {a^b}')
print(f'{a} << {b} = {a<<b}')
print(f'{a} >> {b} = {a>>b}')
a = 0xff
print(a) # 255
# fill with zeroes and second arg is the minimum number of bits that will be displayed
print(f'hex(a)={a:03x}') # 0ff
print(f'bin(a)={a:09b}')
# Comparison operators
# >,<,==,!=, >=, <=
# Boolean operators
# and, or, not, in, not in, is, is not
if __name__ == '__main__':
main() | 4 | 4 |
UPGen/utils.py | HenryLiangzy/COMP9517_Group | 21 | 13499 | <gh_stars>10-100
"""
Helper functions and utilities
"""
from datetime import datetime as dt
from mrcnn import visualize
import numpy as np
import os
import cv2
TIMESTAMP_FORMAT = "%d/%m/%Y %H:%M:%S"
class Logger(object):
"""
Log events and information to a file
"""
def __init__(self, savePath):
self.savePath = savePath
self.log_file = open(self.savePath, 'a')
self.log_line("Start of Log File")
def close(self):
self.log_line("End of Log File")
self.log_file.close()
def flush(self):
self.log_file.flush()
def time_stamp(self):
now = dt.now()
date_time = now.strftime(TIMESTAMP_FORMAT)
self.log_file.write(date_time + ': ')
def log_line(self, *args):
'''
Write each thing to the log file
'''
self.time_stamp()
for log_item in args:
self.log_file.write(str(log_item) + ' ')
self.log_file.write('\n')
self.flush()
def log(self, *args):
'''
Write each thing to the log file
'''
self.time_stamp()
for log_item in args:
self.log_file.write(str(log_item) + ' ')
self.flush()
def newline(self):
self.log_file.write("\n")
self.flush()
def mask_to_rgb(mask):
"""
Converts a mask to RGB Format
"""
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])
return rgb_mask
def mask_to_outlined(mask):
"""
Converts a mask to RGB Format
"""
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])
# put edges over the top of the colours
for i in range(mask.shape[2]):
# Find the contour of the leaf
threshold = mask[:, :, i]
threshold[threshold != 0] = 255
_, contours, hierarchy = cv2.findContours(threshold.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Draw outline on mask
if len(contours) > 0:
cv2.drawContours(rgb_mask, [contours[0]], 0, (255, 255, 255), thickness=1)
return rgb_mask
def check_create_dir(directory):
if not os.path.isdir(directory):
print("creating directory:", directory)
os.mkdir(directory)
return True
return False | 2.953125 | 3 |