body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
bf2a9181172220a0e010b3b42442d4640552c4a7f4deab2014ed8aef6d69c8e0 | def set_population(self, new_population):
'\n Sets a population with a pre-generated one.\n\n Parameters\n ----------\n new_population: array_like\n A matrix with dimensions N by D, which represents the coordinates\n of each particle.\n\n Returns\n -------\n No value.\n '
SwarmAlgorithm.set_population(self, new_population)
self.velocities = np.zeros((self.N, self.D)) | Sets a population with a pre-generated one.
Parameters
----------
new_population: array_like
A matrix with dimensions N by D, which represents the coordinates
of each particle.
Returns
-------
No value. | models/single_objective/gravitational_search.py | set_population | AlexanderKlanovets/swarm_algorithms | 9 | python | def set_population(self, new_population):
'\n Sets a population with a pre-generated one.\n\n Parameters\n ----------\n new_population: array_like\n A matrix with dimensions N by D, which represents the coordinates\n of each particle.\n\n Returns\n -------\n No value.\n '
SwarmAlgorithm.set_population(self, new_population)
self.velocities = np.zeros((self.N, self.D)) | def set_population(self, new_population):
'\n Sets a population with a pre-generated one.\n\n Parameters\n ----------\n new_population: array_like\n A matrix with dimensions N by D, which represents the coordinates\n of each particle.\n\n Returns\n -------\n No value.\n '
SwarmAlgorithm.set_population(self, new_population)
self.velocities = np.zeros((self.N, self.D))<|docstring|>Sets a population with a pre-generated one.
Parameters
----------
new_population: array_like
A matrix with dimensions N by D, which represents the coordinates
of each particle.
Returns
-------
No value.<|endoftext|> |
7755fbb7edfc4050839617dbc54d29976de85bff58e2899bac651fcc591a2963 | def set_params(self, new_params):
'\n Initialize the algorithm with a strategy (vector of parameters).\n\n Parameters\n ----------\n new_params : GravitationalSearchParams\n\n Returns\n -------\n No value.\n '
self.G0 = new_params.G0
self.alpha = new_params.alpha | Initialize the algorithm with a strategy (vector of parameters).
Parameters
----------
new_params : GravitationalSearchParams
Returns
-------
No value. | models/single_objective/gravitational_search.py | set_params | AlexanderKlanovets/swarm_algorithms | 9 | python | def set_params(self, new_params):
'\n Initialize the algorithm with a strategy (vector of parameters).\n\n Parameters\n ----------\n new_params : GravitationalSearchParams\n\n Returns\n -------\n No value.\n '
self.G0 = new_params.G0
self.alpha = new_params.alpha | def set_params(self, new_params):
'\n Initialize the algorithm with a strategy (vector of parameters).\n\n Parameters\n ----------\n new_params : GravitationalSearchParams\n\n Returns\n -------\n No value.\n '
self.G0 = new_params.G0
self.alpha = new_params.alpha<|docstring|>Initialize the algorithm with a strategy (vector of parameters).
Parameters
----------
new_params : GravitationalSearchParams
Returns
-------
No value.<|endoftext|> |
edde43434d1607d96fc1fabb2b8bd5dfd6dcd631093c9574387ca0c97d11176a | def __get_acceleration(self, M, G, iteration):
'\n Computes the acceleration for each object.\n\n Parameters\n ----------\n M : ndarray\n An array of size N representing object (particles) masses.\n G : float\n Gravitational constant.\n iteration : int\n Current iteration of the optimization process.\n \n Returns\n -------\n ndarray\n An N by D matrix, which represents an array of acceleration vectors\n for each object.\n '
final_per = 2
kbest = (final_per + ((1 - (iteration / self.max_iter)) * (100 - final_per)))
kbest = math.trunc(((self.N * kbest) / 100))
M_sorted_i = np.argsort((- M))
E = np.zeros((self.N, self.D))
for i in range(self.N):
for ii in range(kbest):
j = M_sorted_i[ii]
if (j != i):
R = np.linalg.norm((self.particles[i] - self.particles[j]))
vec_dist = (self.particles[j] - self.particles[i])
E[i] += (((np.random.uniform(size=self.D) * M[j]) * vec_dist) / (R + 0.001))
return (E * G) | Computes the acceleration for each object.
Parameters
----------
M : ndarray
An array of size N representing object (particles) masses.
G : float
Gravitational constant.
iteration : int
Current iteration of the optimization process.
Returns
-------
ndarray
An N by D matrix, which represents an array of acceleration vectors
for each object. | models/single_objective/gravitational_search.py | __get_acceleration | AlexanderKlanovets/swarm_algorithms | 9 | python | def __get_acceleration(self, M, G, iteration):
'\n Computes the acceleration for each object.\n\n Parameters\n ----------\n M : ndarray\n An array of size N representing object (particles) masses.\n G : float\n Gravitational constant.\n iteration : int\n Current iteration of the optimization process.\n \n Returns\n -------\n ndarray\n An N by D matrix, which represents an array of acceleration vectors\n for each object.\n '
final_per = 2
kbest = (final_per + ((1 - (iteration / self.max_iter)) * (100 - final_per)))
kbest = math.trunc(((self.N * kbest) / 100))
M_sorted_i = np.argsort((- M))
E = np.zeros((self.N, self.D))
for i in range(self.N):
for ii in range(kbest):
j = M_sorted_i[ii]
if (j != i):
R = np.linalg.norm((self.particles[i] - self.particles[j]))
vec_dist = (self.particles[j] - self.particles[i])
E[i] += (((np.random.uniform(size=self.D) * M[j]) * vec_dist) / (R + 0.001))
return (E * G) | def __get_acceleration(self, M, G, iteration):
'\n Computes the acceleration for each object.\n\n Parameters\n ----------\n M : ndarray\n An array of size N representing object (particles) masses.\n G : float\n Gravitational constant.\n iteration : int\n Current iteration of the optimization process.\n \n Returns\n -------\n ndarray\n An N by D matrix, which represents an array of acceleration vectors\n for each object.\n '
final_per = 2
kbest = (final_per + ((1 - (iteration / self.max_iter)) * (100 - final_per)))
kbest = math.trunc(((self.N * kbest) / 100))
M_sorted_i = np.argsort((- M))
E = np.zeros((self.N, self.D))
for i in range(self.N):
for ii in range(kbest):
j = M_sorted_i[ii]
if (j != i):
R = np.linalg.norm((self.particles[i] - self.particles[j]))
vec_dist = (self.particles[j] - self.particles[i])
E[i] += (((np.random.uniform(size=self.D) * M[j]) * vec_dist) / (R + 0.001))
return (E * G)<|docstring|>Computes the acceleration for each object.
Parameters
----------
M : ndarray
An array of size N representing object (particles) masses.
G : float
Gravitational constant.
iteration : int
Current iteration of the optimization process.
Returns
-------
ndarray
An N by D matrix, which represents an array of acceleration vectors
for each object.<|endoftext|> |
38d8c8feab0164ccdab2f4f3e5b8eb26bca6e10a27ac7d0453ed8d7f11e1d085 | def __move_all(self, a):
'\n Updates the positions of all the particles in the swarm in-place.\n\n Parameters\n ----------\n a : ndarray\n An N by D matrix, which represents an array of acceleration vectors\n for each object.\n\n Returns\n -------\n No value.\n '
self.velocities = ((np.random.uniform(size=(self.N, self.D)) * self.velocities) + a)
self.particles += self.velocities
self.simplebounds(self.particles)
for i in range(self.N):
self.scores[i] = self.fit_func(self.particles[i])
if (self.scores[i] < self.gbest_score):
self.gbest_score = self.scores[i]
self.gbest = np.copy(self.particles[i]) | Updates the positions of all the particles in the swarm in-place.
Parameters
----------
a : ndarray
An N by D matrix, which represents an array of acceleration vectors
for each object.
Returns
-------
No value. | models/single_objective/gravitational_search.py | __move_all | AlexanderKlanovets/swarm_algorithms | 9 | python | def __move_all(self, a):
'\n Updates the positions of all the particles in the swarm in-place.\n\n Parameters\n ----------\n a : ndarray\n An N by D matrix, which represents an array of acceleration vectors\n for each object.\n\n Returns\n -------\n No value.\n '
self.velocities = ((np.random.uniform(size=(self.N, self.D)) * self.velocities) + a)
self.particles += self.velocities
self.simplebounds(self.particles)
for i in range(self.N):
self.scores[i] = self.fit_func(self.particles[i])
if (self.scores[i] < self.gbest_score):
self.gbest_score = self.scores[i]
self.gbest = np.copy(self.particles[i]) | def __move_all(self, a):
'\n Updates the positions of all the particles in the swarm in-place.\n\n Parameters\n ----------\n a : ndarray\n An N by D matrix, which represents an array of acceleration vectors\n for each object.\n\n Returns\n -------\n No value.\n '
self.velocities = ((np.random.uniform(size=(self.N, self.D)) * self.velocities) + a)
self.particles += self.velocities
self.simplebounds(self.particles)
for i in range(self.N):
self.scores[i] = self.fit_func(self.particles[i])
if (self.scores[i] < self.gbest_score):
self.gbest_score = self.scores[i]
self.gbest = np.copy(self.particles[i])<|docstring|>Updates the positions of all the particles in the swarm in-place.
Parameters
----------
a : ndarray
An N by D matrix, which represents an array of acceleration vectors
for each object.
Returns
-------
No value.<|endoftext|> |
0e8da445d17da08c9c7344aa29f8e8148d02ec185d52773b8fccf5cc4b7049fd | def __mass_calc(self):
'\n Calculates object masses based on the fitness-function values.\n\n Parameters\n ----------\n No parameters.\n\n Returns\n -------\n ndarray\n An array of size N containing object masses.\n '
f_min = np.min(self.scores)
f_max = np.max(self.scores)
if (f_max == f_min):
M = np.ones(self.N)
else:
M = ((self.scores - f_max) / (f_min - f_max))
return (M / np.sum(M)) | Calculates object masses based on the fitness-function values.
Parameters
----------
No parameters.
Returns
-------
ndarray
An array of size N containing object masses. | models/single_objective/gravitational_search.py | __mass_calc | AlexanderKlanovets/swarm_algorithms | 9 | python | def __mass_calc(self):
'\n Calculates object masses based on the fitness-function values.\n\n Parameters\n ----------\n No parameters.\n\n Returns\n -------\n ndarray\n An array of size N containing object masses.\n '
f_min = np.min(self.scores)
f_max = np.max(self.scores)
if (f_max == f_min):
M = np.ones(self.N)
else:
M = ((self.scores - f_max) / (f_min - f_max))
return (M / np.sum(M)) | def __mass_calc(self):
'\n Calculates object masses based on the fitness-function values.\n\n Parameters\n ----------\n No parameters.\n\n Returns\n -------\n ndarray\n An array of size N containing object masses.\n '
f_min = np.min(self.scores)
f_max = np.max(self.scores)
if (f_max == f_min):
M = np.ones(self.N)
else:
M = ((self.scores - f_max) / (f_min - f_max))
return (M / np.sum(M))<|docstring|>Calculates object masses based on the fitness-function values.
Parameters
----------
No parameters.
Returns
-------
ndarray
An array of size N containing object masses.<|endoftext|> |
dab46c7c89c978a3d7595339e3a683f33548edf1b5ea83ee5786c73e0999a878 | def __g_const_calc(self, iteration):
"\n Reduces gravitational constant as the iterations' number increases\n (makes the search more accurate).\n\n Parameters\n ----------\n iteration : int\n Current iteration of the optimization process.\n Returns\n -------\n float\n New value of gravitational constant.\n "
return (self.G0 * math.exp((((- self.alpha) * iteration) / self.max_iter))) | Reduces gravitational constant as the iterations' number increases
(makes the search more accurate).
Parameters
----------
iteration : int
Current iteration of the optimization process.
Returns
-------
float
New value of gravitational constant. | models/single_objective/gravitational_search.py | __g_const_calc | AlexanderKlanovets/swarm_algorithms | 9 | python | def __g_const_calc(self, iteration):
"\n Reduces gravitational constant as the iterations' number increases\n (makes the search more accurate).\n\n Parameters\n ----------\n iteration : int\n Current iteration of the optimization process.\n Returns\n -------\n float\n New value of gravitational constant.\n "
return (self.G0 * math.exp((((- self.alpha) * iteration) / self.max_iter))) | def __g_const_calc(self, iteration):
"\n Reduces gravitational constant as the iterations' number increases\n (makes the search more accurate).\n\n Parameters\n ----------\n iteration : int\n Current iteration of the optimization process.\n Returns\n -------\n float\n New value of gravitational constant.\n "
return (self.G0 * math.exp((((- self.alpha) * iteration) / self.max_iter)))<|docstring|>Reduces gravitational constant as the iterations' number increases
(makes the search more accurate).
Parameters
----------
iteration : int
Current iteration of the optimization process.
Returns
-------
float
New value of gravitational constant.<|endoftext|> |
8972a7dfa43087febebb4031d7056236acaf2d04407e7b19f29a644326263bff | def optimize(self):
'\n Main loop of the algorithm.\n\n Parameters\n ----------\n No parameters.\n\n Returns\n -------\n ndarray\n The coordinates of the global best particle at the end of\n the optimization process. \n '
i = 0
while (i < self.max_iter):
M = self.__mass_calc()
G = self.__g_const_calc((i + 1))
a = self.__get_acceleration(M, G, (i + 1))
self.__move_all(a)
self.eval_num += self.N
i += 1
return self.gbest | Main loop of the algorithm.
Parameters
----------
No parameters.
Returns
-------
ndarray
The coordinates of the global best particle at the end of
the optimization process. | models/single_objective/gravitational_search.py | optimize | AlexanderKlanovets/swarm_algorithms | 9 | python | def optimize(self):
'\n Main loop of the algorithm.\n\n Parameters\n ----------\n No parameters.\n\n Returns\n -------\n ndarray\n The coordinates of the global best particle at the end of\n the optimization process. \n '
i = 0
while (i < self.max_iter):
M = self.__mass_calc()
G = self.__g_const_calc((i + 1))
a = self.__get_acceleration(M, G, (i + 1))
self.__move_all(a)
self.eval_num += self.N
i += 1
return self.gbest | def optimize(self):
'\n Main loop of the algorithm.\n\n Parameters\n ----------\n No parameters.\n\n Returns\n -------\n ndarray\n The coordinates of the global best particle at the end of\n the optimization process. \n '
i = 0
while (i < self.max_iter):
M = self.__mass_calc()
G = self.__g_const_calc((i + 1))
a = self.__get_acceleration(M, G, (i + 1))
self.__move_all(a)
self.eval_num += self.N
i += 1
return self.gbest<|docstring|>Main loop of the algorithm.
Parameters
----------
No parameters.
Returns
-------
ndarray
The coordinates of the global best particle at the end of
the optimization process.<|endoftext|> |
65284563da063121626b6fbd983bf887bd90a75602b8a26f1364b1a090a625e5 | def modify_report(self, report: Report) -> Report:
'\n modify the given report\n '
raise NotImplementedError() | modify the given report | powerapi/report_modifier/report_modifier.py | modify_report | Zenika/powerapi | 77 | python | def modify_report(self, report: Report) -> Report:
'\n \n '
raise NotImplementedError() | def modify_report(self, report: Report) -> Report:
'\n \n '
raise NotImplementedError()<|docstring|>modify the given report<|endoftext|> |
accdddb2c504c4b8c5fe3587c38c9c4dc62ff2db9205ac9c5f44afdbe686fd80 | def well_log_display(df, column_depth, column_list, column_semilog=None, min_depth=None, max_depth=None, column_min=None, column_max=None, colors=None, fm_tops=None, fm_depths=None, tight_layout=1, title_size=10):
"\n Display log side-by-side style\n Input:\n df is your dataframe\n specify min_depth and max_depth as the upper and lower depth limit\n column_depth is the column name of your depth\n column_list is the LIST of column names that you will display\n\n column_semilog is specific for resistivity column; if your resistivities are \n in column 3, specify as: column_semilog=2. Default is None, so if you don't \n specify, the resistivity will be plotted in normal axis instead\n \n column_min is list of minimum values for the x-axes.\n column_max is list of maximum values for the x-axes.\n \n colors is the list of colors specified for each log names. Default is None,\n so if don't specify, the colors will be Matplotlib default (blue)\n fm_tops and fm_depths are the list of formation top names and depths.\n Default is None, so no tops are shown. Specify both lists, if you want\n to show the tops\n "
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
if (column_semilog == None):
logs = column_list
(fig, ax) = plt.subplots(nrows=1, ncols=len(logs), figsize=(20, 10))
if (colors == None):
for i in range(len(logs)):
ax[i].plot(df[logs[i]], df[column_depth])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
for i in range(len(logs)):
ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
logs = column_list
(fig, ax) = plt.subplots(nrows=1, ncols=len(logs), figsize=(20, 10))
if (colors == None):
for i in range(len(logs)):
if (i == column_semilog):
ax[i].semilogx(df[logs[i]], df[column_depth])
else:
ax[i].plot(df[logs[i]], df[column_depth])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
for i in range(len(logs)):
if (i == column_semilog):
ax[i].semilogx(df[logs[i]], df[column_depth], color=colors[i])
else:
ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
if ((fm_tops != None) and (fm_depths != None)):
rgb = []
for j in range(len(fm_tops)):
_ = (random.random(), random.random(), random.random())
rgb.append(_)
for i in range(len(logs)):
for j in range(len(fm_tops)):
ax[i].axhline(y=fm_depths[j], linestyle=':', c=rgb[j], label=fm_tops[j])
plt.tight_layout(tight_layout)
plt.show() | Display log side-by-side style
Input:
df is your dataframe
specify min_depth and max_depth as the upper and lower depth limit
column_depth is the column name of your depth
column_list is the LIST of column names that you will display
column_semilog is specific for resistivity column; if your resistivities are
in column 3, specify as: column_semilog=2. Default is None, so if you don't
specify, the resistivity will be plotted in normal axis instead
column_min is list of minimum values for the x-axes.
column_max is list of maximum values for the x-axes.
colors is the list of colors specified for each log names. Default is None,
so if don't specify, the colors will be Matplotlib default (blue)
fm_tops and fm_depths are the list of formation top names and depths.
Default is None, so no tops are shown. Specify both lists, if you want
to show the tops | well_log_display.py | well_log_display | yohanesnuwara/formation-evaluation | 22 | python | def well_log_display(df, column_depth, column_list, column_semilog=None, min_depth=None, max_depth=None, column_min=None, column_max=None, colors=None, fm_tops=None, fm_depths=None, tight_layout=1, title_size=10):
"\n Display log side-by-side style\n Input:\n df is your dataframe\n specify min_depth and max_depth as the upper and lower depth limit\n column_depth is the column name of your depth\n column_list is the LIST of column names that you will display\n\n column_semilog is specific for resistivity column; if your resistivities are \n in column 3, specify as: column_semilog=2. Default is None, so if you don't \n specify, the resistivity will be plotted in normal axis instead\n \n column_min is list of minimum values for the x-axes.\n column_max is list of maximum values for the x-axes.\n \n colors is the list of colors specified for each log names. Default is None,\n so if don't specify, the colors will be Matplotlib default (blue)\n fm_tops and fm_depths are the list of formation top names and depths.\n Default is None, so no tops are shown. Specify both lists, if you want\n to show the tops\n "
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
if (column_semilog == None):
logs = column_list
(fig, ax) = plt.subplots(nrows=1, ncols=len(logs), figsize=(20, 10))
if (colors == None):
for i in range(len(logs)):
ax[i].plot(df[logs[i]], df[column_depth])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
for i in range(len(logs)):
ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
logs = column_list
(fig, ax) = plt.subplots(nrows=1, ncols=len(logs), figsize=(20, 10))
if (colors == None):
for i in range(len(logs)):
if (i == column_semilog):
ax[i].semilogx(df[logs[i]], df[column_depth])
else:
ax[i].plot(df[logs[i]], df[column_depth])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
for i in range(len(logs)):
if (i == column_semilog):
ax[i].semilogx(df[logs[i]], df[column_depth], color=colors[i])
else:
ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
if ((fm_tops != None) and (fm_depths != None)):
rgb = []
for j in range(len(fm_tops)):
_ = (random.random(), random.random(), random.random())
rgb.append(_)
for i in range(len(logs)):
for j in range(len(fm_tops)):
ax[i].axhline(y=fm_depths[j], linestyle=':', c=rgb[j], label=fm_tops[j])
plt.tight_layout(tight_layout)
plt.show() | def well_log_display(df, column_depth, column_list, column_semilog=None, min_depth=None, max_depth=None, column_min=None, column_max=None, colors=None, fm_tops=None, fm_depths=None, tight_layout=1, title_size=10):
"\n Display log side-by-side style\n Input:\n df is your dataframe\n specify min_depth and max_depth as the upper and lower depth limit\n column_depth is the column name of your depth\n column_list is the LIST of column names that you will display\n\n column_semilog is specific for resistivity column; if your resistivities are \n in column 3, specify as: column_semilog=2. Default is None, so if you don't \n specify, the resistivity will be plotted in normal axis instead\n \n column_min is list of minimum values for the x-axes.\n column_max is list of maximum values for the x-axes.\n \n colors is the list of colors specified for each log names. Default is None,\n so if don't specify, the colors will be Matplotlib default (blue)\n fm_tops and fm_depths are the list of formation top names and depths.\n Default is None, so no tops are shown. Specify both lists, if you want\n to show the tops\n "
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
if (column_semilog == None):
logs = column_list
(fig, ax) = plt.subplots(nrows=1, ncols=len(logs), figsize=(20, 10))
if (colors == None):
for i in range(len(logs)):
ax[i].plot(df[logs[i]], df[column_depth])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
for i in range(len(logs)):
ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
logs = column_list
(fig, ax) = plt.subplots(nrows=1, ncols=len(logs), figsize=(20, 10))
if (colors == None):
for i in range(len(logs)):
if (i == column_semilog):
ax[i].semilogx(df[logs[i]], df[column_depth])
else:
ax[i].plot(df[logs[i]], df[column_depth])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
else:
for i in range(len(logs)):
if (i == column_semilog):
ax[i].semilogx(df[logs[i]], df[column_depth], color=colors[i])
else:
ax[i].plot(df[logs[i]], df[column_depth], color=colors[i])
ax[i].set_title(logs[i], size=title_size)
ax[i].minorticks_on()
ax[i].grid(which='major', linestyle='-', linewidth='0.5', color='lime')
ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black')
if ((column_min != None) and (column_max != None)):
ax[i].set_xlim(column_min[i], column_max[i])
if ((min_depth != None) and (max_depth != None)):
ax[i].set_ylim(min_depth, max_depth)
ax[i].invert_yaxis()
if ((fm_tops != None) and (fm_depths != None)):
rgb = []
for j in range(len(fm_tops)):
_ = (random.random(), random.random(), random.random())
rgb.append(_)
for i in range(len(logs)):
for j in range(len(fm_tops)):
ax[i].axhline(y=fm_depths[j], linestyle=':', c=rgb[j], label=fm_tops[j])
plt.tight_layout(tight_layout)
plt.show()<|docstring|>Display log side-by-side style
Input:
df is your dataframe
specify min_depth and max_depth as the upper and lower depth limit
column_depth is the column name of your depth
column_list is the LIST of column names that you will display
column_semilog is specific for resistivity column; if your resistivities are
in column 3, specify as: column_semilog=2. Default is None, so if you don't
specify, the resistivity will be plotted in normal axis instead
column_min is list of minimum values for the x-axes.
column_max is list of maximum values for the x-axes.
colors is the list of colors specified for each log names. Default is None,
so if don't specify, the colors will be Matplotlib default (blue)
fm_tops and fm_depths are the list of formation top names and depths.
Default is None, so no tops are shown. Specify both lists, if you want
to show the tops<|endoftext|> |
6aafebb5f8033d0f5440a89437646f85ea8552cd6e3c4551290dd921fe5b3d61 | def test_set_date_range_ok_params(self):
'\n Test constructor with correct inputs\n '
self.date_range_processor = DateRangeProcessor()
self.date_range_processor.date_range = self.date_range
self.assertIsNotNone(self.date_range_processor.date_range)
self.assertIsInstance(self.date_range_processor.date_range, list)
self.assertTrue((len(self.date_range_processor.date_range) == 2))
self.assertTrue((self.date_range_processor.start == self.date_range[0]))
self.assertTrue((self.date_range_processor.end == self.date_range[1])) | Test constructor with correct inputs | test/test_dates_processor.py | test_set_date_range_ok_params | UoMResearchIT/UoM_AQ_Data_Tools | 1 | python | def test_set_date_range_ok_params(self):
'\n \n '
self.date_range_processor = DateRangeProcessor()
self.date_range_processor.date_range = self.date_range
self.assertIsNotNone(self.date_range_processor.date_range)
self.assertIsInstance(self.date_range_processor.date_range, list)
self.assertTrue((len(self.date_range_processor.date_range) == 2))
self.assertTrue((self.date_range_processor.start == self.date_range[0]))
self.assertTrue((self.date_range_processor.end == self.date_range[1])) | def test_set_date_range_ok_params(self):
'\n \n '
self.date_range_processor = DateRangeProcessor()
self.date_range_processor.date_range = self.date_range
self.assertIsNotNone(self.date_range_processor.date_range)
self.assertIsInstance(self.date_range_processor.date_range, list)
self.assertTrue((len(self.date_range_processor.date_range) == 2))
self.assertTrue((self.date_range_processor.start == self.date_range[0]))
self.assertTrue((self.date_range_processor.end == self.date_range[1]))<|docstring|>Test constructor with correct inputs<|endoftext|> |
18235a57332ec359ccc64295cb0e447ed9f3189c690c8a851e8d834aee6bdf87 | def test_set_date_range_bad_params(self):
'\n Test constructor with invalid inputs\n '
self.date_range_processor = DateRangeProcessor()
bad_dates = [['bad1', 'bad2'], [0, 3], [datetime(2011, 1, 12, 0), datetime(2010, 1, 2, 5)], [self, self]]
with self.assertRaises(AssertionError):
for bad_dates in bad_dates:
self.date_range_processor.date_range = bad_dates | Test constructor with invalid inputs | test/test_dates_processor.py | test_set_date_range_bad_params | UoMResearchIT/UoM_AQ_Data_Tools | 1 | python | def test_set_date_range_bad_params(self):
'\n \n '
self.date_range_processor = DateRangeProcessor()
bad_dates = [['bad1', 'bad2'], [0, 3], [datetime(2011, 1, 12, 0), datetime(2010, 1, 2, 5)], [self, self]]
with self.assertRaises(AssertionError):
for bad_dates in bad_dates:
self.date_range_processor.date_range = bad_dates | def test_set_date_range_bad_params(self):
'\n \n '
self.date_range_processor = DateRangeProcessor()
bad_dates = [['bad1', 'bad2'], [0, 3], [datetime(2011, 1, 12, 0), datetime(2010, 1, 2, 5)], [self, self]]
with self.assertRaises(AssertionError):
for bad_dates in bad_dates:
self.date_range_processor.date_range = bad_dates<|docstring|>Test constructor with invalid inputs<|endoftext|> |
773c724718f0dc01754e27ba316947a3a70813c9c31575cd8e5f03aaf44f07cf | def test_date_range_get_available_dates(self):
'\n Test get_available_dates method\n '
self.date_range_processor = DateRangeProcessor()
available_dates = self.date_range_processor.get_available_dates()
self.assertIsNotNone(available_dates)
self.assertIsInstance(available_dates, list)
self.assertTrue((len(available_dates) == 2))
self.assertTrue((available_dates[0] <= available_dates[1])) | Test get_available_dates method | test/test_dates_processor.py | test_date_range_get_available_dates | UoMResearchIT/UoM_AQ_Data_Tools | 1 | python | def test_date_range_get_available_dates(self):
'\n \n '
self.date_range_processor = DateRangeProcessor()
available_dates = self.date_range_processor.get_available_dates()
self.assertIsNotNone(available_dates)
self.assertIsInstance(available_dates, list)
self.assertTrue((len(available_dates) == 2))
self.assertTrue((available_dates[0] <= available_dates[1])) | def test_date_range_get_available_dates(self):
'\n \n '
self.date_range_processor = DateRangeProcessor()
available_dates = self.date_range_processor.get_available_dates()
self.assertIsNotNone(available_dates)
self.assertIsInstance(available_dates, list)
self.assertTrue((len(available_dates) == 2))
self.assertTrue((available_dates[0] <= available_dates[1]))<|docstring|>Test get_available_dates method<|endoftext|> |
39841847494220d7f89b3eee46b07c420897db5273e633122a69cce36f23dd15 | def test_set_date_years_ok_params(self):
'\n Test setting years with correct inputs\n '
self.date_years_processor = DateYearsProcessor()
self.date_years_processor.years = self.years
self.assertIsNotNone(self.date_years_processor.years)
self.assertIsInstance(self.date_years_processor.years, list)
self.assertEqual(self.date_years_processor.years, self.years) | Test setting years with correct inputs | test/test_dates_processor.py | test_set_date_years_ok_params | UoMResearchIT/UoM_AQ_Data_Tools | 1 | python | def test_set_date_years_ok_params(self):
'\n \n '
self.date_years_processor = DateYearsProcessor()
self.date_years_processor.years = self.years
self.assertIsNotNone(self.date_years_processor.years)
self.assertIsInstance(self.date_years_processor.years, list)
self.assertEqual(self.date_years_processor.years, self.years) | def test_set_date_years_ok_params(self):
'\n \n '
self.date_years_processor = DateYearsProcessor()
self.date_years_processor.years = self.years
self.assertIsNotNone(self.date_years_processor.years)
self.assertIsInstance(self.date_years_processor.years, list)
self.assertEqual(self.date_years_processor.years, self.years)<|docstring|>Test setting years with correct inputs<|endoftext|> |
616b39c4759547444d655f8585c2fe415fb50bd8b4ac779839c991062f97003e | def test_set_date_years_bad_params(self):
'\n Test setting years with invalid inputs\n '
self.date_years_processor = DateYearsProcessor()
bad_dates = [['2010', '2016', '2017'], [0, 3], [datetime(2011, 1, 12, 0), datetime(2010, 1, 2, 5)], [self, self]]
with self.assertRaises(AssertionError):
for bad_dates in bad_dates:
self.date_years_processor.years = bad_dates | Test setting years with invalid inputs | test/test_dates_processor.py | test_set_date_years_bad_params | UoMResearchIT/UoM_AQ_Data_Tools | 1 | python | def test_set_date_years_bad_params(self):
'\n \n '
self.date_years_processor = DateYearsProcessor()
bad_dates = [['2010', '2016', '2017'], [0, 3], [datetime(2011, 1, 12, 0), datetime(2010, 1, 2, 5)], [self, self]]
with self.assertRaises(AssertionError):
for bad_dates in bad_dates:
self.date_years_processor.years = bad_dates | def test_set_date_years_bad_params(self):
'\n \n '
self.date_years_processor = DateYearsProcessor()
bad_dates = [['2010', '2016', '2017'], [0, 3], [datetime(2011, 1, 12, 0), datetime(2010, 1, 2, 5)], [self, self]]
with self.assertRaises(AssertionError):
for bad_dates in bad_dates:
self.date_years_processor.years = bad_dates<|docstring|>Test setting years with invalid inputs<|endoftext|> |
3bec6bd94e52bb1ee448e8bd68a1bd5868d113e6acc4b9adb45b187de867160c | def test_date_years_get_available_dates(self):
'\n Test get_available_years method\n '
self.date_years_processor = DateYearsProcessor()
available_years = self.date_years_processor.get_available_years()
self.assertIsNotNone(available_years)
self.assertIsInstance(available_years, list)
self.assertTrue((available_years <= sorted(available_years))) | Test get_available_years method | test/test_dates_processor.py | test_date_years_get_available_dates | UoMResearchIT/UoM_AQ_Data_Tools | 1 | python | def test_date_years_get_available_dates(self):
'\n \n '
self.date_years_processor = DateYearsProcessor()
available_years = self.date_years_processor.get_available_years()
self.assertIsNotNone(available_years)
self.assertIsInstance(available_years, list)
self.assertTrue((available_years <= sorted(available_years))) | def test_date_years_get_available_dates(self):
'\n \n '
self.date_years_processor = DateYearsProcessor()
available_years = self.date_years_processor.get_available_years()
self.assertIsNotNone(available_years)
self.assertIsInstance(available_years, list)
self.assertTrue((available_years <= sorted(available_years)))<|docstring|>Test get_available_years method<|endoftext|> |
76676683c588fcdf7c2380fb84d4b5b1c91f59207ba262f8ff3170d31d495eab | def setup(i):
"\n Input: {\n cfg - meta of this soft entry\n self_cfg - meta of module soft\n ck_kernel - import CK kernel module (to reuse functions)\n\n host_os_uoa - host OS UOA\n host_os_uid - host OS UID\n host_os_dict - host OS meta\n\n target_os_uoa - target OS UOA\n target_os_uid - target OS UID\n target_os_dict - target OS meta\n\n target_device_id - target device ID (if via ADB)\n\n tags - list of tags used to search this entry\n\n env - updated environment vars from meta\n customize - updated customize vars from meta\n\n deps - resolved dependencies for this soft\n\n interactive - if 'yes', can ask questions, otherwise quiet\n }\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n\n bat - prepared string for bat file\n }\n\n "
cus = i.get('customize', {})
env = i['env']
ep = cus['env_prefix']
full_path = cus.get('full_path', '')
(install_dir, model_filename) = os.path.split(full_path)
install_env = cus.get('install_env', {})
env[(ep + '_MODEL_NAME')] = install_env['MODEL_NAME']
env[(ep + '_DEFAULT_HEIGHT')] = install_env['DEFAULT_HEIGHT']
env[(ep + '_DEFAULT_WIDTH')] = install_env['DEFAULT_WIDTH']
env[(ep + '_DATASET_TYPE')] = install_env['DATASET_TYPE']
env[(ep + '_LABELMAP_FILE')] = os.path.join(install_dir, install_env['LABELMAP_FILE'])
frozen_graph_name = (os.path.join(install_dir, install_env['FROZEN_GRAPH']) if ('FROZEN_GRAPH' in install_env) else full_path)
env[(ep + '_FROZEN_GRAPH')] = frozen_graph_name
env[(ep + '_TF_FROZEN_FILEPATH')] = frozen_graph_name
for varname in install_env.keys():
if varname.startswith('MODEL_'):
env[(ep + varname[len('MODEL'):])] = install_env[varname]
for varname in install_env.keys():
if varname.startswith('ML_MODEL_'):
env[varname] = install_env[varname]
if ('WEIGHTS_FILE' in install_dir):
env[(ep + '_WEIGHTS_FILE')] = os.path.join(install_dir, install_env['WEIGHTS_FILE'])
hosd = i['host_os_dict']
winh = hosd.get('windows_base', '')
env['PYTHONPATH'] = (install_dir + (';%PYTHONPATH%' if (winh == 'yes') else ':${PYTHONPATH}'))
return {'return': 0, 'bat': ''} | Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
} | soft/model.tensorflow.object-detection/customize.py | setup | G4V/ck-tensorflow | 108 | python | def setup(i):
"\n Input: {\n cfg - meta of this soft entry\n self_cfg - meta of module soft\n ck_kernel - import CK kernel module (to reuse functions)\n\n host_os_uoa - host OS UOA\n host_os_uid - host OS UID\n host_os_dict - host OS meta\n\n target_os_uoa - target OS UOA\n target_os_uid - target OS UID\n target_os_dict - target OS meta\n\n target_device_id - target device ID (if via ADB)\n\n tags - list of tags used to search this entry\n\n env - updated environment vars from meta\n customize - updated customize vars from meta\n\n deps - resolved dependencies for this soft\n\n interactive - if 'yes', can ask questions, otherwise quiet\n }\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n\n bat - prepared string for bat file\n }\n\n "
cus = i.get('customize', {})
env = i['env']
ep = cus['env_prefix']
full_path = cus.get('full_path', )
(install_dir, model_filename) = os.path.split(full_path)
install_env = cus.get('install_env', {})
env[(ep + '_MODEL_NAME')] = install_env['MODEL_NAME']
env[(ep + '_DEFAULT_HEIGHT')] = install_env['DEFAULT_HEIGHT']
env[(ep + '_DEFAULT_WIDTH')] = install_env['DEFAULT_WIDTH']
env[(ep + '_DATASET_TYPE')] = install_env['DATASET_TYPE']
env[(ep + '_LABELMAP_FILE')] = os.path.join(install_dir, install_env['LABELMAP_FILE'])
frozen_graph_name = (os.path.join(install_dir, install_env['FROZEN_GRAPH']) if ('FROZEN_GRAPH' in install_env) else full_path)
env[(ep + '_FROZEN_GRAPH')] = frozen_graph_name
env[(ep + '_TF_FROZEN_FILEPATH')] = frozen_graph_name
for varname in install_env.keys():
if varname.startswith('MODEL_'):
env[(ep + varname[len('MODEL'):])] = install_env[varname]
for varname in install_env.keys():
if varname.startswith('ML_MODEL_'):
env[varname] = install_env[varname]
if ('WEIGHTS_FILE' in install_dir):
env[(ep + '_WEIGHTS_FILE')] = os.path.join(install_dir, install_env['WEIGHTS_FILE'])
hosd = i['host_os_dict']
winh = hosd.get('windows_base', )
env['PYTHONPATH'] = (install_dir + (';%PYTHONPATH%' if (winh == 'yes') else ':${PYTHONPATH}'))
return {'return': 0, 'bat': } | def setup(i):
"\n Input: {\n cfg - meta of this soft entry\n self_cfg - meta of module soft\n ck_kernel - import CK kernel module (to reuse functions)\n\n host_os_uoa - host OS UOA\n host_os_uid - host OS UID\n host_os_dict - host OS meta\n\n target_os_uoa - target OS UOA\n target_os_uid - target OS UID\n target_os_dict - target OS meta\n\n target_device_id - target device ID (if via ADB)\n\n tags - list of tags used to search this entry\n\n env - updated environment vars from meta\n customize - updated customize vars from meta\n\n deps - resolved dependencies for this soft\n\n interactive - if 'yes', can ask questions, otherwise quiet\n }\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n\n bat - prepared string for bat file\n }\n\n "
cus = i.get('customize', {})
env = i['env']
ep = cus['env_prefix']
full_path = cus.get('full_path', )
(install_dir, model_filename) = os.path.split(full_path)
install_env = cus.get('install_env', {})
env[(ep + '_MODEL_NAME')] = install_env['MODEL_NAME']
env[(ep + '_DEFAULT_HEIGHT')] = install_env['DEFAULT_HEIGHT']
env[(ep + '_DEFAULT_WIDTH')] = install_env['DEFAULT_WIDTH']
env[(ep + '_DATASET_TYPE')] = install_env['DATASET_TYPE']
env[(ep + '_LABELMAP_FILE')] = os.path.join(install_dir, install_env['LABELMAP_FILE'])
frozen_graph_name = (os.path.join(install_dir, install_env['FROZEN_GRAPH']) if ('FROZEN_GRAPH' in install_env) else full_path)
env[(ep + '_FROZEN_GRAPH')] = frozen_graph_name
env[(ep + '_TF_FROZEN_FILEPATH')] = frozen_graph_name
for varname in install_env.keys():
if varname.startswith('MODEL_'):
env[(ep + varname[len('MODEL'):])] = install_env[varname]
for varname in install_env.keys():
if varname.startswith('ML_MODEL_'):
env[varname] = install_env[varname]
if ('WEIGHTS_FILE' in install_dir):
env[(ep + '_WEIGHTS_FILE')] = os.path.join(install_dir, install_env['WEIGHTS_FILE'])
hosd = i['host_os_dict']
winh = hosd.get('windows_base', )
env['PYTHONPATH'] = (install_dir + (';%PYTHONPATH%' if (winh == 'yes') else ':${PYTHONPATH}'))
return {'return': 0, 'bat': }<|docstring|>Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}<|endoftext|> |
50a87d818c1f5d21493eba24f89bb65af79ab2714322c2edfd302e089570b58a | def generate_protocol(protocol_json, use_logical_types=False, custom_imports=None, avro_json_converter=None):
'\n Generate content of the file which will contain concrete classes for RecordSchemas and requests contained\n in the avro protocol\n :param str protocol_json: JSON containing avro protocol\n :param bool use_logical_types: Use logical types extensions if true\n :param list[str] custom_imports: Add additional import modules\n :param str avro_json_converter: AvroJsonConverter type to use for default values\n :return:\n '
if (avro_json_converter is None):
avro_json_converter = 'avrojson.AvroJsonConverter'
if ('(' not in avro_json_converter):
avro_json_converter += ('(use_logical_types=%s, schema_types=__SCHEMA_TYPES)' % use_logical_types)
custom_imports = (custom_imports or [])
if (not hasattr(protocol, 'parse')):
proto = protocol.Parse(protocol_json)
else:
proto = protocol.parse(protocol_json)
schemas = []
messages = []
schema_names = set()
request_names = set()
known_types = set()
for (schema_idx, record_schema) in enumerate(proto.types):
if isinstance(record_schema, (schema.RecordSchema, schema.EnumSchema)):
schemas.append((schema_idx, record_schema))
known_types.add(clean_fullname(record_schema.fullname))
for message in (six.itervalues(proto.messages) if six.PY2 else proto.messages):
messages.append((message, message.request, (message.response if (isinstance(message.response, (schema.EnumSchema, schema.RecordSchema)) and (clean_fullname(message.response.fullname) not in known_types)) else None)))
if isinstance(message.response, (schema.EnumSchema, schema.RecordSchema)):
known_types.add(clean_fullname(message.response.fullname))
namespaces = {}
for (schema_idx, record_schema) in schemas:
(ns, name) = ns_.split_fullname(clean_fullname(record_schema.fullname))
if (ns not in namespaces):
namespaces[ns] = {'requests': [], 'records': [], 'responses': []}
namespaces[ns]['records'].append((schema_idx, record_schema))
for (message, request, response) in messages:
fullname = ns_.make_fullname(proto.namespace, clean_fullname(message.name))
(ns, name) = ns_.split_fullname(fullname)
if (ns not in namespaces):
namespaces[ns] = {'requests': [], 'records': [], 'responses': []}
namespaces[ns]['requests'].append(message)
if response:
namespaces[ns]['responses'].append(message)
main_out = StringIO()
writer = TabbedWriter(main_out)
write_preamble(writer, use_logical_types, custom_imports)
write_protocol_preamble(writer, use_logical_types, custom_imports)
write_get_schema(writer)
write_populate_schemas(writer)
writer.write('\n\n\nclass SchemaClasses(object):')
with writer.indent():
writer.write('\n\n')
current_namespace = tuple()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
if (not (namespaces[ns]['responses'] or namespaces[ns]['records'])):
continue
namespace = ns.split('.')
if (namespace != current_namespace):
start_namespace(current_namespace, namespace, writer)
for (idx, record) in namespaces[ns]['records']:
schema_names.add(clean_fullname(record.fullname))
if isinstance(record, schema.RecordSchema):
write_schema_record(record, writer, use_logical_types)
elif isinstance(record, schema.EnumSchema):
write_enum(record, writer)
for message in namespaces[ns]['responses']:
schema_names.add(clean_fullname(message.response.fullname))
if isinstance(message.response, schema.RecordSchema):
write_schema_record(message.response, writer, use_logical_types)
elif isinstance(message.response, schema.EnumSchema):
write_enum(message.response, writer)
writer.write('\n\npass')
writer.set_tab(0)
writer.write('\n\n\nclass RequestClasses(object):')
with writer.indent() as indent:
writer.write('\n\n')
current_namespace = tuple()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
if (not (namespaces[ns]['requests'] or namespaces[ns]['responses'])):
continue
namespace = ns.split('.')
if (namespace != current_namespace):
start_namespace(current_namespace, namespace, writer)
for message in namespaces[ns]['requests']:
request_names.add(ns_.make_fullname(proto.namespace, clean_fullname(message.name)))
write_protocol_request(message, proto.namespace, writer, use_logical_types)
writer.write('\n\npass')
writer.untab()
writer.set_tab(0)
writer.write('\n__SCHEMA_TYPES = {\n')
writer.tab()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
for (idx, record) in (namespaces[ns]['records'] or []):
writer.write(("'%s': SchemaClasses.%sClass,\n" % (clean_fullname(record.fullname), clean_fullname(record.fullname))))
for message in (namespaces[ns]['responses'] or []):
writer.write(("'%s': SchemaClasses.%sClass,\n" % (clean_fullname(message.response.fullname), clean_fullname(message.response.fullname))))
for message in (namespaces[ns]['requests'] or []):
name = ns_.make_fullname(proto.namespace, clean_fullname(message.name))
writer.write(("'%s': RequestClasses.%sRequestClass, \n" % (name, name)))
writer.untab()
writer.write('\n}\n')
writer.write(('_json_converter = %s\n\n' % avro_json_converter))
value = main_out.getvalue()
main_out.close()
return (value, schema_names, request_names) | Generate content of the file which will contain concrete classes for RecordSchemas and requests contained
in the avro protocol
:param str protocol_json: JSON containing avro protocol
:param bool use_logical_types: Use logical types extensions if true
:param list[str] custom_imports: Add additional import modules
:param str avro_json_converter: AvroJsonConverter type to use for default values
:return: | avrogen/protocol.py | generate_protocol | kevinhu/avro_gen | 22 | python | def generate_protocol(protocol_json, use_logical_types=False, custom_imports=None, avro_json_converter=None):
'\n Generate content of the file which will contain concrete classes for RecordSchemas and requests contained\n in the avro protocol\n :param str protocol_json: JSON containing avro protocol\n :param bool use_logical_types: Use logical types extensions if true\n :param list[str] custom_imports: Add additional import modules\n :param str avro_json_converter: AvroJsonConverter type to use for default values\n :return:\n '
if (avro_json_converter is None):
avro_json_converter = 'avrojson.AvroJsonConverter'
if ('(' not in avro_json_converter):
avro_json_converter += ('(use_logical_types=%s, schema_types=__SCHEMA_TYPES)' % use_logical_types)
custom_imports = (custom_imports or [])
if (not hasattr(protocol, 'parse')):
proto = protocol.Parse(protocol_json)
else:
proto = protocol.parse(protocol_json)
schemas = []
messages = []
schema_names = set()
request_names = set()
known_types = set()
for (schema_idx, record_schema) in enumerate(proto.types):
if isinstance(record_schema, (schema.RecordSchema, schema.EnumSchema)):
schemas.append((schema_idx, record_schema))
known_types.add(clean_fullname(record_schema.fullname))
for message in (six.itervalues(proto.messages) if six.PY2 else proto.messages):
messages.append((message, message.request, (message.response if (isinstance(message.response, (schema.EnumSchema, schema.RecordSchema)) and (clean_fullname(message.response.fullname) not in known_types)) else None)))
if isinstance(message.response, (schema.EnumSchema, schema.RecordSchema)):
known_types.add(clean_fullname(message.response.fullname))
namespaces = {}
for (schema_idx, record_schema) in schemas:
(ns, name) = ns_.split_fullname(clean_fullname(record_schema.fullname))
if (ns not in namespaces):
namespaces[ns] = {'requests': [], 'records': [], 'responses': []}
namespaces[ns]['records'].append((schema_idx, record_schema))
for (message, request, response) in messages:
fullname = ns_.make_fullname(proto.namespace, clean_fullname(message.name))
(ns, name) = ns_.split_fullname(fullname)
if (ns not in namespaces):
namespaces[ns] = {'requests': [], 'records': [], 'responses': []}
namespaces[ns]['requests'].append(message)
if response:
namespaces[ns]['responses'].append(message)
main_out = StringIO()
writer = TabbedWriter(main_out)
write_preamble(writer, use_logical_types, custom_imports)
write_protocol_preamble(writer, use_logical_types, custom_imports)
write_get_schema(writer)
write_populate_schemas(writer)
writer.write('\n\n\nclass SchemaClasses(object):')
with writer.indent():
writer.write('\n\n')
current_namespace = tuple()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
if (not (namespaces[ns]['responses'] or namespaces[ns]['records'])):
continue
namespace = ns.split('.')
if (namespace != current_namespace):
start_namespace(current_namespace, namespace, writer)
for (idx, record) in namespaces[ns]['records']:
schema_names.add(clean_fullname(record.fullname))
if isinstance(record, schema.RecordSchema):
write_schema_record(record, writer, use_logical_types)
elif isinstance(record, schema.EnumSchema):
write_enum(record, writer)
for message in namespaces[ns]['responses']:
schema_names.add(clean_fullname(message.response.fullname))
if isinstance(message.response, schema.RecordSchema):
write_schema_record(message.response, writer, use_logical_types)
elif isinstance(message.response, schema.EnumSchema):
write_enum(message.response, writer)
writer.write('\n\npass')
writer.set_tab(0)
writer.write('\n\n\nclass RequestClasses(object):')
with writer.indent() as indent:
writer.write('\n\n')
current_namespace = tuple()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
if (not (namespaces[ns]['requests'] or namespaces[ns]['responses'])):
continue
namespace = ns.split('.')
if (namespace != current_namespace):
start_namespace(current_namespace, namespace, writer)
for message in namespaces[ns]['requests']:
request_names.add(ns_.make_fullname(proto.namespace, clean_fullname(message.name)))
write_protocol_request(message, proto.namespace, writer, use_logical_types)
writer.write('\n\npass')
writer.untab()
writer.set_tab(0)
writer.write('\n__SCHEMA_TYPES = {\n')
writer.tab()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
for (idx, record) in (namespaces[ns]['records'] or []):
writer.write(("'%s': SchemaClasses.%sClass,\n" % (clean_fullname(record.fullname), clean_fullname(record.fullname))))
for message in (namespaces[ns]['responses'] or []):
writer.write(("'%s': SchemaClasses.%sClass,\n" % (clean_fullname(message.response.fullname), clean_fullname(message.response.fullname))))
for message in (namespaces[ns]['requests'] or []):
name = ns_.make_fullname(proto.namespace, clean_fullname(message.name))
writer.write(("'%s': RequestClasses.%sRequestClass, \n" % (name, name)))
writer.untab()
writer.write('\n}\n')
writer.write(('_json_converter = %s\n\n' % avro_json_converter))
value = main_out.getvalue()
main_out.close()
return (value, schema_names, request_names) | def generate_protocol(protocol_json, use_logical_types=False, custom_imports=None, avro_json_converter=None):
'\n Generate content of the file which will contain concrete classes for RecordSchemas and requests contained\n in the avro protocol\n :param str protocol_json: JSON containing avro protocol\n :param bool use_logical_types: Use logical types extensions if true\n :param list[str] custom_imports: Add additional import modules\n :param str avro_json_converter: AvroJsonConverter type to use for default values\n :return:\n '
if (avro_json_converter is None):
avro_json_converter = 'avrojson.AvroJsonConverter'
if ('(' not in avro_json_converter):
avro_json_converter += ('(use_logical_types=%s, schema_types=__SCHEMA_TYPES)' % use_logical_types)
custom_imports = (custom_imports or [])
if (not hasattr(protocol, 'parse')):
proto = protocol.Parse(protocol_json)
else:
proto = protocol.parse(protocol_json)
schemas = []
messages = []
schema_names = set()
request_names = set()
known_types = set()
for (schema_idx, record_schema) in enumerate(proto.types):
if isinstance(record_schema, (schema.RecordSchema, schema.EnumSchema)):
schemas.append((schema_idx, record_schema))
known_types.add(clean_fullname(record_schema.fullname))
for message in (six.itervalues(proto.messages) if six.PY2 else proto.messages):
messages.append((message, message.request, (message.response if (isinstance(message.response, (schema.EnumSchema, schema.RecordSchema)) and (clean_fullname(message.response.fullname) not in known_types)) else None)))
if isinstance(message.response, (schema.EnumSchema, schema.RecordSchema)):
known_types.add(clean_fullname(message.response.fullname))
namespaces = {}
for (schema_idx, record_schema) in schemas:
(ns, name) = ns_.split_fullname(clean_fullname(record_schema.fullname))
if (ns not in namespaces):
namespaces[ns] = {'requests': [], 'records': [], 'responses': []}
namespaces[ns]['records'].append((schema_idx, record_schema))
for (message, request, response) in messages:
fullname = ns_.make_fullname(proto.namespace, clean_fullname(message.name))
(ns, name) = ns_.split_fullname(fullname)
if (ns not in namespaces):
namespaces[ns] = {'requests': [], 'records': [], 'responses': []}
namespaces[ns]['requests'].append(message)
if response:
namespaces[ns]['responses'].append(message)
main_out = StringIO()
writer = TabbedWriter(main_out)
write_preamble(writer, use_logical_types, custom_imports)
write_protocol_preamble(writer, use_logical_types, custom_imports)
write_get_schema(writer)
write_populate_schemas(writer)
writer.write('\n\n\nclass SchemaClasses(object):')
with writer.indent():
writer.write('\n\n')
current_namespace = tuple()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
if (not (namespaces[ns]['responses'] or namespaces[ns]['records'])):
continue
namespace = ns.split('.')
if (namespace != current_namespace):
start_namespace(current_namespace, namespace, writer)
for (idx, record) in namespaces[ns]['records']:
schema_names.add(clean_fullname(record.fullname))
if isinstance(record, schema.RecordSchema):
write_schema_record(record, writer, use_logical_types)
elif isinstance(record, schema.EnumSchema):
write_enum(record, writer)
for message in namespaces[ns]['responses']:
schema_names.add(clean_fullname(message.response.fullname))
if isinstance(message.response, schema.RecordSchema):
write_schema_record(message.response, writer, use_logical_types)
elif isinstance(message.response, schema.EnumSchema):
write_enum(message.response, writer)
writer.write('\n\npass')
writer.set_tab(0)
writer.write('\n\n\nclass RequestClasses(object):')
with writer.indent() as indent:
writer.write('\n\n')
current_namespace = tuple()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
if (not (namespaces[ns]['requests'] or namespaces[ns]['responses'])):
continue
namespace = ns.split('.')
if (namespace != current_namespace):
start_namespace(current_namespace, namespace, writer)
for message in namespaces[ns]['requests']:
request_names.add(ns_.make_fullname(proto.namespace, clean_fullname(message.name)))
write_protocol_request(message, proto.namespace, writer, use_logical_types)
writer.write('\n\npass')
writer.untab()
writer.set_tab(0)
writer.write('\n__SCHEMA_TYPES = {\n')
writer.tab()
all_ns = sorted(namespaces.keys())
for ns in all_ns:
for (idx, record) in (namespaces[ns]['records'] or []):
writer.write(("'%s': SchemaClasses.%sClass,\n" % (clean_fullname(record.fullname), clean_fullname(record.fullname))))
for message in (namespaces[ns]['responses'] or []):
writer.write(("'%s': SchemaClasses.%sClass,\n" % (clean_fullname(message.response.fullname), clean_fullname(message.response.fullname))))
for message in (namespaces[ns]['requests'] or []):
name = ns_.make_fullname(proto.namespace, clean_fullname(message.name))
writer.write(("'%s': RequestClasses.%sRequestClass, \n" % (name, name)))
writer.untab()
writer.write('\n}\n')
writer.write(('_json_converter = %s\n\n' % avro_json_converter))
value = main_out.getvalue()
main_out.close()
return (value, schema_names, request_names)<|docstring|>Generate content of the file which will contain concrete classes for RecordSchemas and requests contained
in the avro protocol
:param str protocol_json: JSON containing avro protocol
:param bool use_logical_types: Use logical types extensions if true
:param list[str] custom_imports: Add additional import modules
:param str avro_json_converter: AvroJsonConverter type to use for default values
:return:<|endoftext|> |
fc835caa37be34376b0be9cc80402239d58963ead4bfc80a4fe0e591e2376cf0 | def write_protocol_preamble(writer, use_logical_types, custom_imports):
'\n Writes a preamble for avro protocol implementation.\n The preamble will contain a function which can load the protocol from the file\n and a global PROTOCOL variable which will contain parsed protocol\n :param writer:\n :param use_logical_types:\n :return:\n '
write_read_file(writer)
writer.write('\nfrom avro import protocol as avro_protocol')
for i in (custom_imports or []):
writer.write(('import %s\n' % i))
if use_logical_types:
writer.write('\nfrom avrogen import logical')
writer.write('\n\ndef __get_protocol(file_name):')
with writer.indent():
writer.write('\nproto = avro_protocol.Parse(__read_file(file_name)) if six.PY3 else avro_protocol.parse(__read_file(file_name))')
writer.write('\nreturn proto')
writer.write('\n\nPROTOCOL = __get_protocol(os.path.join(os.path.dirname(__file__), "protocol.avpr"))') | Writes a preamble for avro protocol implementation.
The preamble will contain a function which can load the protocol from the file
and a global PROTOCOL variable which will contain parsed protocol
:param writer:
:param use_logical_types:
:return: | avrogen/protocol.py | write_protocol_preamble | kevinhu/avro_gen | 22 | python | def write_protocol_preamble(writer, use_logical_types, custom_imports):
'\n Writes a preamble for avro protocol implementation.\n The preamble will contain a function which can load the protocol from the file\n and a global PROTOCOL variable which will contain parsed protocol\n :param writer:\n :param use_logical_types:\n :return:\n '
write_read_file(writer)
writer.write('\nfrom avro import protocol as avro_protocol')
for i in (custom_imports or []):
writer.write(('import %s\n' % i))
if use_logical_types:
writer.write('\nfrom avrogen import logical')
writer.write('\n\ndef __get_protocol(file_name):')
with writer.indent():
writer.write('\nproto = avro_protocol.Parse(__read_file(file_name)) if six.PY3 else avro_protocol.parse(__read_file(file_name))')
writer.write('\nreturn proto')
writer.write('\n\nPROTOCOL = __get_protocol(os.path.join(os.path.dirname(__file__), "protocol.avpr"))') | def write_protocol_preamble(writer, use_logical_types, custom_imports):
'\n Writes a preamble for avro protocol implementation.\n The preamble will contain a function which can load the protocol from the file\n and a global PROTOCOL variable which will contain parsed protocol\n :param writer:\n :param use_logical_types:\n :return:\n '
write_read_file(writer)
writer.write('\nfrom avro import protocol as avro_protocol')
for i in (custom_imports or []):
writer.write(('import %s\n' % i))
if use_logical_types:
writer.write('\nfrom avrogen import logical')
writer.write('\n\ndef __get_protocol(file_name):')
with writer.indent():
writer.write('\nproto = avro_protocol.Parse(__read_file(file_name)) if six.PY3 else avro_protocol.parse(__read_file(file_name))')
writer.write('\nreturn proto')
writer.write('\n\nPROTOCOL = __get_protocol(os.path.join(os.path.dirname(__file__), "protocol.avpr"))')<|docstring|>Writes a preamble for avro protocol implementation.
The preamble will contain a function which can load the protocol from the file
and a global PROTOCOL variable which will contain parsed protocol
:param writer:
:param use_logical_types:
:return:<|endoftext|> |
f9ce261e99dd77bab1e362748bfd3db575774ae444ef08ecb13b1d4d4ef7171a | def write_populate_schemas(writer):
'\n Write code which will look through the protocol and populate __SCHEMAS dict which will be used by get_type_schema()\n :param writer:\n :return:\n '
writer.write('\nfor rec in PROTOCOL.types:')
with writer.indent():
writer.write('\n__SCHEMAS[rec.fullname] = rec')
writer.write('\nfor resp in (six.itervalues(PROTOCOL.messages) if six.PY2 else PROTOCOL.messages):')
with writer.indent():
writer.write('\nif isinstance(resp.response, (avro_schema.RecordSchema, avro_schema.EnumSchema)):')
with writer.indent():
writer.write('\n__SCHEMAS[resp.response.fullname] = resp.response')
writer.write('\nPROTOCOL_MESSAGES = {m.name.lstrip("."):m for m in (six.itervalues(PROTOCOL.messages) if six.PY2 else PROTOCOL.messages)}\n') | Write code which will look through the protocol and populate __SCHEMAS dict which will be used by get_type_schema()
:param writer:
:return: | avrogen/protocol.py | write_populate_schemas | kevinhu/avro_gen | 22 | python | def write_populate_schemas(writer):
'\n Write code which will look through the protocol and populate __SCHEMAS dict which will be used by get_type_schema()\n :param writer:\n :return:\n '
writer.write('\nfor rec in PROTOCOL.types:')
with writer.indent():
writer.write('\n__SCHEMAS[rec.fullname] = rec')
writer.write('\nfor resp in (six.itervalues(PROTOCOL.messages) if six.PY2 else PROTOCOL.messages):')
with writer.indent():
writer.write('\nif isinstance(resp.response, (avro_schema.RecordSchema, avro_schema.EnumSchema)):')
with writer.indent():
writer.write('\n__SCHEMAS[resp.response.fullname] = resp.response')
writer.write('\nPROTOCOL_MESSAGES = {m.name.lstrip("."):m for m in (six.itervalues(PROTOCOL.messages) if six.PY2 else PROTOCOL.messages)}\n') | def write_populate_schemas(writer):
'\n Write code which will look through the protocol and populate __SCHEMAS dict which will be used by get_type_schema()\n :param writer:\n :return:\n '
writer.write('\nfor rec in PROTOCOL.types:')
with writer.indent():
writer.write('\n__SCHEMAS[rec.fullname] = rec')
writer.write('\nfor resp in (six.itervalues(PROTOCOL.messages) if six.PY2 else PROTOCOL.messages):')
with writer.indent():
writer.write('\nif isinstance(resp.response, (avro_schema.RecordSchema, avro_schema.EnumSchema)):')
with writer.indent():
writer.write('\n__SCHEMAS[resp.response.fullname] = resp.response')
writer.write('\nPROTOCOL_MESSAGES = {m.name.lstrip("."):m for m in (six.itervalues(PROTOCOL.messages) if six.PY2 else PROTOCOL.messages)}\n')<|docstring|>Write code which will look through the protocol and populate __SCHEMAS dict which will be used by get_type_schema()
:param writer:
:return:<|endoftext|> |
20d1e850d0bd63ef8a8f266d67d76ac2ccec6f1bc8e4e3d0fb8be3a79605e083 | def write_protocol_files(protocol_json, output_folder, use_logical_types=False, custom_imports=None):
'\n Generates concrete classes for RecordSchemas and requests and a SpecificReader for types and messages contained\n in the avro protocol.\n :param str protocol_json: JSON containing avro protocol\n :param str output_folder: Folder to write generated files to.\n :param list[str] custom_imports: Add additional import modules\n :return:\n '
(proto_py, record_names, request_names) = generate_protocol(protocol_json, use_logical_types, custom_imports)
names = sorted((list(record_names) + list(request_names)))
if (not os.path.isdir(output_folder)):
os.mkdir(output_folder)
with open(os.path.join(output_folder, 'schema_classes.py'), 'w+') as f:
f.write(proto_py)
with open(os.path.join(output_folder, 'protocol.avpr'), 'w+') as f:
f.write(protocol_json)
ns_dict = generate_namespace_modules(names, output_folder)
with open(os.path.join(output_folder, '__init__.py'), 'w+') as f:
pass
write_namespace_modules(ns_dict, request_names, output_folder)
write_specific_reader(record_names, output_folder, use_logical_types) | Generates concrete classes for RecordSchemas and requests and a SpecificReader for types and messages contained
in the avro protocol.
:param str protocol_json: JSON containing avro protocol
:param str output_folder: Folder to write generated files to.
:param list[str] custom_imports: Add additional import modules
:return: | avrogen/protocol.py | write_protocol_files | kevinhu/avro_gen | 22 | python | def write_protocol_files(protocol_json, output_folder, use_logical_types=False, custom_imports=None):
'\n Generates concrete classes for RecordSchemas and requests and a SpecificReader for types and messages contained\n in the avro protocol.\n :param str protocol_json: JSON containing avro protocol\n :param str output_folder: Folder to write generated files to.\n :param list[str] custom_imports: Add additional import modules\n :return:\n '
(proto_py, record_names, request_names) = generate_protocol(protocol_json, use_logical_types, custom_imports)
names = sorted((list(record_names) + list(request_names)))
if (not os.path.isdir(output_folder)):
os.mkdir(output_folder)
with open(os.path.join(output_folder, 'schema_classes.py'), 'w+') as f:
f.write(proto_py)
with open(os.path.join(output_folder, 'protocol.avpr'), 'w+') as f:
f.write(protocol_json)
ns_dict = generate_namespace_modules(names, output_folder)
with open(os.path.join(output_folder, '__init__.py'), 'w+') as f:
pass
write_namespace_modules(ns_dict, request_names, output_folder)
write_specific_reader(record_names, output_folder, use_logical_types) | def write_protocol_files(protocol_json, output_folder, use_logical_types=False, custom_imports=None):
'\n Generates concrete classes for RecordSchemas and requests and a SpecificReader for types and messages contained\n in the avro protocol.\n :param str protocol_json: JSON containing avro protocol\n :param str output_folder: Folder to write generated files to.\n :param list[str] custom_imports: Add additional import modules\n :return:\n '
(proto_py, record_names, request_names) = generate_protocol(protocol_json, use_logical_types, custom_imports)
names = sorted((list(record_names) + list(request_names)))
if (not os.path.isdir(output_folder)):
os.mkdir(output_folder)
with open(os.path.join(output_folder, 'schema_classes.py'), 'w+') as f:
f.write(proto_py)
with open(os.path.join(output_folder, 'protocol.avpr'), 'w+') as f:
f.write(protocol_json)
ns_dict = generate_namespace_modules(names, output_folder)
with open(os.path.join(output_folder, '__init__.py'), 'w+') as f:
pass
write_namespace_modules(ns_dict, request_names, output_folder)
write_specific_reader(record_names, output_folder, use_logical_types)<|docstring|>Generates concrete classes for RecordSchemas and requests and a SpecificReader for types and messages contained
in the avro protocol.
:param str protocol_json: JSON containing avro protocol
:param str output_folder: Folder to write generated files to.
:param list[str] custom_imports: Add additional import modules
:return:<|endoftext|> |
d771fa87254ce1409621b5e6f50f1b9aad01ef4466392f5e4490cd3cd2f39659 | def write_specific_reader(record_types, output_folder, use_logical_types):
'\n Write specific reader implementation for a protocol\n :param list[avro.schema.RecordSchema] record_types:\n :param output_folder:\n :return:\n '
with open(os.path.join(output_folder, '__init__.py'), 'a+') as f:
writer = TabbedWriter(f)
writer.write('\n\nfrom .schema_classes import SchemaClasses, PROTOCOL as my_proto, get_schema_type')
writer.write('\nfrom avro.io import DatumReader')
write_reader_impl(record_types, writer, use_logical_types) | Write specific reader implementation for a protocol
:param list[avro.schema.RecordSchema] record_types:
:param output_folder:
:return: | avrogen/protocol.py | write_specific_reader | kevinhu/avro_gen | 22 | python | def write_specific_reader(record_types, output_folder, use_logical_types):
'\n Write specific reader implementation for a protocol\n :param list[avro.schema.RecordSchema] record_types:\n :param output_folder:\n :return:\n '
with open(os.path.join(output_folder, '__init__.py'), 'a+') as f:
writer = TabbedWriter(f)
writer.write('\n\nfrom .schema_classes import SchemaClasses, PROTOCOL as my_proto, get_schema_type')
writer.write('\nfrom avro.io import DatumReader')
write_reader_impl(record_types, writer, use_logical_types) | def write_specific_reader(record_types, output_folder, use_logical_types):
'\n Write specific reader implementation for a protocol\n :param list[avro.schema.RecordSchema] record_types:\n :param output_folder:\n :return:\n '
with open(os.path.join(output_folder, '__init__.py'), 'a+') as f:
writer = TabbedWriter(f)
writer.write('\n\nfrom .schema_classes import SchemaClasses, PROTOCOL as my_proto, get_schema_type')
writer.write('\nfrom avro.io import DatumReader')
write_reader_impl(record_types, writer, use_logical_types)<|docstring|>Write specific reader implementation for a protocol
:param list[avro.schema.RecordSchema] record_types:
:param output_folder:
:return:<|endoftext|> |
f8fa29f1b9b79e0c47600607103cfeb0ed7c9587036faa016b9efcc1d193d66c | def write_namespace_modules(ns_dict, request_names, output_folder):
'\n Writes content of the generated namespace modules. A python module will be created for each namespace\n and will import concrete schema classes from SchemaClasses\n :param ns_dict:\n :param request_names:\n :param output_folder:\n :return:\n '
for ns in six.iterkeys(ns_dict):
with open(os.path.join(output_folder, ns.replace('.', os.path.sep), '__init__.py'), 'w+') as f:
currency = '.'
if (ns != ''):
currency += ('.' * len(ns.split('.')))
f.write('from {currency}schema_classes import SchemaClasses\n'.format(currency=currency))
f.write('from {currency}schema_classes import RequestClasses\n'.format(currency=currency))
for name in ns_dict[ns]:
if (ns_.make_fullname(ns, name) in request_names):
f.write('{name}Request = RequestClasses.{ns}{name}RequestClass\n'.format(name=name, ns=(ns if (not ns) else (ns + '.'))))
else:
f.write('{name} = SchemaClasses.{ns}{name}Class\n'.format(name=name, ns=(ns if (not ns) else (ns + '.')))) | Writes content of the generated namespace modules. A python module will be created for each namespace
and will import concrete schema classes from SchemaClasses
:param ns_dict:
:param request_names:
:param output_folder:
:return: | avrogen/protocol.py | write_namespace_modules | kevinhu/avro_gen | 22 | python | def write_namespace_modules(ns_dict, request_names, output_folder):
'\n Writes content of the generated namespace modules. A python module will be created for each namespace\n and will import concrete schema classes from SchemaClasses\n :param ns_dict:\n :param request_names:\n :param output_folder:\n :return:\n '
for ns in six.iterkeys(ns_dict):
with open(os.path.join(output_folder, ns.replace('.', os.path.sep), '__init__.py'), 'w+') as f:
currency = '.'
if (ns != ):
currency += ('.' * len(ns.split('.')))
f.write('from {currency}schema_classes import SchemaClasses\n'.format(currency=currency))
f.write('from {currency}schema_classes import RequestClasses\n'.format(currency=currency))
for name in ns_dict[ns]:
if (ns_.make_fullname(ns, name) in request_names):
f.write('{name}Request = RequestClasses.{ns}{name}RequestClass\n'.format(name=name, ns=(ns if (not ns) else (ns + '.'))))
else:
f.write('{name} = SchemaClasses.{ns}{name}Class\n'.format(name=name, ns=(ns if (not ns) else (ns + '.')))) | def write_namespace_modules(ns_dict, request_names, output_folder):
'\n Writes content of the generated namespace modules. A python module will be created for each namespace\n and will import concrete schema classes from SchemaClasses\n :param ns_dict:\n :param request_names:\n :param output_folder:\n :return:\n '
for ns in six.iterkeys(ns_dict):
with open(os.path.join(output_folder, ns.replace('.', os.path.sep), '__init__.py'), 'w+') as f:
currency = '.'
if (ns != ):
currency += ('.' * len(ns.split('.')))
f.write('from {currency}schema_classes import SchemaClasses\n'.format(currency=currency))
f.write('from {currency}schema_classes import RequestClasses\n'.format(currency=currency))
for name in ns_dict[ns]:
if (ns_.make_fullname(ns, name) in request_names):
f.write('{name}Request = RequestClasses.{ns}{name}RequestClass\n'.format(name=name, ns=(ns if (not ns) else (ns + '.'))))
else:
f.write('{name} = SchemaClasses.{ns}{name}Class\n'.format(name=name, ns=(ns if (not ns) else (ns + '.'))))<|docstring|>Writes content of the generated namespace modules. A python module will be created for each namespace
and will import concrete schema classes from SchemaClasses
:param ns_dict:
:param request_names:
:param output_folder:
:return:<|endoftext|> |
62d7e1f8add01fd0fd74c39b09ea871492a5435bcf61be4c3c88d62368994f67 | def from_netcdf(input_file: Union[(str, Path)], output_uri: str, input_group_path: str='/', recursive: bool=True, output_key: Optional[str]=None, output_ctx: Optional[tiledb.Ctx]=None, unlimited_dim_size: int=10000, dim_dtype: np.dtype=_DEFAULT_INDEX_DTYPE, tiles_by_var: Optional[Dict[(str, Dict[(str, Optional[Sequence[int]])])]]=None, tiles_by_dims: Optional[Dict[(str, Dict[(Sequence[str], Optional[Sequence[int]])])]]=None, coords_to_dims: bool=False, collect_attrs: bool=True, unpack_vars: bool=False, coords_filters: Optional[tiledb.FilterList]=None, offsets_filters: Optional[tiledb.FilterList]=None, attrs_filters: Optional[tiledb.FilterList]=None, copy_metadata: bool=True, use_virtual_groups: bool=False):
"Converts a NetCDF input file to nested TileDB CF dataspaces.\n\n See :class:`~tiledb.cf.NetCDF4ConverterEngine` for more\n information on the backend converter engine used for the conversion.\n\n Parameters:\n input_file: The input NetCDF file to generate the converter engine from.\n output_uri: The uniform resource identifier for the TileDB group to be created.\n input_group_path: The path to the NetCDF group to copy data from. Use ``'/'``\n for the root group.\n recursive: If ``True``, recursively convert groups in a NetCDF file. Otherwise,\n only convert group provided.\n output_key: If not ``None``, encryption key to decrypt arrays.\n output_ctx: If not ``None``, TileDB context wrapper for a TileDB storage\n manager.\n dim_dtype: The numpy dtype for the TileDB dimensions created from NetCDF\n dimensions.\n unlimited_dim_size: The size of the domain for TileDB dimensions created\n from unlimited NetCDF dimensions.\n dim_dtype: The numpy dtype for TileDB dimensions.\n tiles_by_var: A map from the name of a NetCDF variable to the tiles of the\n dimensions of the variable in the generated TileDB array.\n tiles_by_dims: A map from the name of NetCDF dimensions defining a variable\n to the tiles of those dimensions in the generated TileDB array.\n coords_to_dims: If ``True``, convert the NetCDF coordinate variable into a\n TileDB dimension for sparse arrays. Otherwise, convert the coordinate\n dimension into a TileDB dimension and the coordinate variable into a\n TileDB attribute.\n collect_attrs: If ``True``, store all attributes with the same dimensions in\n the same array. Otherwise, store each attribute in a scalar array.\n unpack_vars: Unpack NetCDF variables with NetCDF attributes ``scale_factor``\n or ``add_offset`` using the transformation ``scale_factor * value +\n unpack``.\n coords_filters: Default filters for all dimensions.\n offsets_filters: Default filters for all offsets for variable attributes\n and dimensions.\n attrs_filters: Default filters for all attributes.\n copy_metadata: If ``True`` copy NetCDF group and variable attributes to\n TileDB metadata. If ``False`` do not copy metadata.\n use_virtual_groups: If ``True``, create a virtual group using ``output_uri``\n as the name for the group metadata array. All other arrays will be named\n using the convention ``{uri}_{array_name}`` where ``array_name`` is the\n name of the array.\n "
from .converter import NetCDF4ConverterEngine, open_netcdf_group
output_uri = (output_uri if (not output_uri.endswith('/')) else output_uri[:(- 1)])
if (tiles_by_var is None):
tiles_by_var = {}
if (tiles_by_dims is None):
tiles_by_dims = {}
def recursive_convert(netcdf_group):
converter = NetCDF4ConverterEngine.from_group(netcdf_group, unlimited_dim_size, dim_dtype, tiles_by_var.get(netcdf_group.path), tiles_by_dims.get(netcdf_group.path), coords_to_dims=coords_to_dims, collect_attrs=collect_attrs, unpack_vars=unpack_vars, coords_filters=coords_filters, offsets_filters=offsets_filters, attrs_filters=attrs_filters)
if use_virtual_groups:
group_uri = (output_uri if (netcdf_group.path == '/') else (output_uri + netcdf_group.path.replace('/', '_')))
converter.convert_to_virtual_group(group_uri, output_key, output_ctx, input_netcdf_group=netcdf_group, copy_metadata=copy_metadata)
else:
group_uri = (output_uri + netcdf_group.path)
converter.convert_to_group(group_uri, output_key, output_ctx, input_netcdf_group=netcdf_group, copy_metadata=copy_metadata)
if recursive:
for subgroup in netcdf_group.groups.values():
recursive_convert(subgroup)
with open_netcdf_group(input_file=input_file, group_path=input_group_path) as dataset:
recursive_convert(dataset) | Converts a NetCDF input file to nested TileDB CF dataspaces.
See :class:`~tiledb.cf.NetCDF4ConverterEngine` for more
information on the backend converter engine used for the conversion.
Parameters:
input_file: The input NetCDF file to generate the converter engine from.
output_uri: The uniform resource identifier for the TileDB group to be created.
input_group_path: The path to the NetCDF group to copy data from. Use ``'/'``
for the root group.
recursive: If ``True``, recursively convert groups in a NetCDF file. Otherwise,
only convert group provided.
output_key: If not ``None``, encryption key to decrypt arrays.
output_ctx: If not ``None``, TileDB context wrapper for a TileDB storage
manager.
dim_dtype: The numpy dtype for the TileDB dimensions created from NetCDF
dimensions.
unlimited_dim_size: The size of the domain for TileDB dimensions created
from unlimited NetCDF dimensions.
dim_dtype: The numpy dtype for TileDB dimensions.
tiles_by_var: A map from the name of a NetCDF variable to the tiles of the
dimensions of the variable in the generated TileDB array.
tiles_by_dims: A map from the name of NetCDF dimensions defining a variable
to the tiles of those dimensions in the generated TileDB array.
coords_to_dims: If ``True``, convert the NetCDF coordinate variable into a
TileDB dimension for sparse arrays. Otherwise, convert the coordinate
dimension into a TileDB dimension and the coordinate variable into a
TileDB attribute.
collect_attrs: If ``True``, store all attributes with the same dimensions in
the same array. Otherwise, store each attribute in a scalar array.
unpack_vars: Unpack NetCDF variables with NetCDF attributes ``scale_factor``
or ``add_offset`` using the transformation ``scale_factor * value +
unpack``.
coords_filters: Default filters for all dimensions.
offsets_filters: Default filters for all offsets for variable attributes
and dimensions.
attrs_filters: Default filters for all attributes.
copy_metadata: If ``True`` copy NetCDF group and variable attributes to
TileDB metadata. If ``False`` do not copy metadata.
use_virtual_groups: If ``True``, create a virtual group using ``output_uri``
as the name for the group metadata array. All other arrays will be named
using the convention ``{uri}_{array_name}`` where ``array_name`` is the
name of the array. | tiledb/cf/netcdf_engine/api.py | from_netcdf | TileDB-Inc/TileDB-CF-Py | 12 | python | def from_netcdf(input_file: Union[(str, Path)], output_uri: str, input_group_path: str='/', recursive: bool=True, output_key: Optional[str]=None, output_ctx: Optional[tiledb.Ctx]=None, unlimited_dim_size: int=10000, dim_dtype: np.dtype=_DEFAULT_INDEX_DTYPE, tiles_by_var: Optional[Dict[(str, Dict[(str, Optional[Sequence[int]])])]]=None, tiles_by_dims: Optional[Dict[(str, Dict[(Sequence[str], Optional[Sequence[int]])])]]=None, coords_to_dims: bool=False, collect_attrs: bool=True, unpack_vars: bool=False, coords_filters: Optional[tiledb.FilterList]=None, offsets_filters: Optional[tiledb.FilterList]=None, attrs_filters: Optional[tiledb.FilterList]=None, copy_metadata: bool=True, use_virtual_groups: bool=False):
"Converts a NetCDF input file to nested TileDB CF dataspaces.\n\n See :class:`~tiledb.cf.NetCDF4ConverterEngine` for more\n information on the backend converter engine used for the conversion.\n\n Parameters:\n input_file: The input NetCDF file to generate the converter engine from.\n output_uri: The uniform resource identifier for the TileDB group to be created.\n input_group_path: The path to the NetCDF group to copy data from. Use ``'/'``\n for the root group.\n recursive: If ``True``, recursively convert groups in a NetCDF file. Otherwise,\n only convert group provided.\n output_key: If not ``None``, encryption key to decrypt arrays.\n output_ctx: If not ``None``, TileDB context wrapper for a TileDB storage\n manager.\n dim_dtype: The numpy dtype for the TileDB dimensions created from NetCDF\n dimensions.\n unlimited_dim_size: The size of the domain for TileDB dimensions created\n from unlimited NetCDF dimensions.\n dim_dtype: The numpy dtype for TileDB dimensions.\n tiles_by_var: A map from the name of a NetCDF variable to the tiles of the\n dimensions of the variable in the generated TileDB array.\n tiles_by_dims: A map from the name of NetCDF dimensions defining a variable\n to the tiles of those dimensions in the generated TileDB array.\n coords_to_dims: If ``True``, convert the NetCDF coordinate variable into a\n TileDB dimension for sparse arrays. Otherwise, convert the coordinate\n dimension into a TileDB dimension and the coordinate variable into a\n TileDB attribute.\n collect_attrs: If ``True``, store all attributes with the same dimensions in\n the same array. Otherwise, store each attribute in a scalar array.\n unpack_vars: Unpack NetCDF variables with NetCDF attributes ``scale_factor``\n or ``add_offset`` using the transformation ``scale_factor * value +\n unpack``.\n coords_filters: Default filters for all dimensions.\n offsets_filters: Default filters for all offsets for variable attributes\n and dimensions.\n attrs_filters: Default filters for all attributes.\n copy_metadata: If ``True`` copy NetCDF group and variable attributes to\n TileDB metadata. If ``False`` do not copy metadata.\n use_virtual_groups: If ``True``, create a virtual group using ``output_uri``\n as the name for the group metadata array. All other arrays will be named\n using the convention ``{uri}_{array_name}`` where ``array_name`` is the\n name of the array.\n "
from .converter import NetCDF4ConverterEngine, open_netcdf_group
output_uri = (output_uri if (not output_uri.endswith('/')) else output_uri[:(- 1)])
if (tiles_by_var is None):
tiles_by_var = {}
if (tiles_by_dims is None):
tiles_by_dims = {}
def recursive_convert(netcdf_group):
converter = NetCDF4ConverterEngine.from_group(netcdf_group, unlimited_dim_size, dim_dtype, tiles_by_var.get(netcdf_group.path), tiles_by_dims.get(netcdf_group.path), coords_to_dims=coords_to_dims, collect_attrs=collect_attrs, unpack_vars=unpack_vars, coords_filters=coords_filters, offsets_filters=offsets_filters, attrs_filters=attrs_filters)
if use_virtual_groups:
group_uri = (output_uri if (netcdf_group.path == '/') else (output_uri + netcdf_group.path.replace('/', '_')))
converter.convert_to_virtual_group(group_uri, output_key, output_ctx, input_netcdf_group=netcdf_group, copy_metadata=copy_metadata)
else:
group_uri = (output_uri + netcdf_group.path)
converter.convert_to_group(group_uri, output_key, output_ctx, input_netcdf_group=netcdf_group, copy_metadata=copy_metadata)
if recursive:
for subgroup in netcdf_group.groups.values():
recursive_convert(subgroup)
with open_netcdf_group(input_file=input_file, group_path=input_group_path) as dataset:
recursive_convert(dataset) | def from_netcdf(input_file: Union[(str, Path)], output_uri: str, input_group_path: str='/', recursive: bool=True, output_key: Optional[str]=None, output_ctx: Optional[tiledb.Ctx]=None, unlimited_dim_size: int=10000, dim_dtype: np.dtype=_DEFAULT_INDEX_DTYPE, tiles_by_var: Optional[Dict[(str, Dict[(str, Optional[Sequence[int]])])]]=None, tiles_by_dims: Optional[Dict[(str, Dict[(Sequence[str], Optional[Sequence[int]])])]]=None, coords_to_dims: bool=False, collect_attrs: bool=True, unpack_vars: bool=False, coords_filters: Optional[tiledb.FilterList]=None, offsets_filters: Optional[tiledb.FilterList]=None, attrs_filters: Optional[tiledb.FilterList]=None, copy_metadata: bool=True, use_virtual_groups: bool=False):
"Converts a NetCDF input file to nested TileDB CF dataspaces.\n\n See :class:`~tiledb.cf.NetCDF4ConverterEngine` for more\n information on the backend converter engine used for the conversion.\n\n Parameters:\n input_file: The input NetCDF file to generate the converter engine from.\n output_uri: The uniform resource identifier for the TileDB group to be created.\n input_group_path: The path to the NetCDF group to copy data from. Use ``'/'``\n for the root group.\n recursive: If ``True``, recursively convert groups in a NetCDF file. Otherwise,\n only convert group provided.\n output_key: If not ``None``, encryption key to decrypt arrays.\n output_ctx: If not ``None``, TileDB context wrapper for a TileDB storage\n manager.\n dim_dtype: The numpy dtype for the TileDB dimensions created from NetCDF\n dimensions.\n unlimited_dim_size: The size of the domain for TileDB dimensions created\n from unlimited NetCDF dimensions.\n dim_dtype: The numpy dtype for TileDB dimensions.\n tiles_by_var: A map from the name of a NetCDF variable to the tiles of the\n dimensions of the variable in the generated TileDB array.\n tiles_by_dims: A map from the name of NetCDF dimensions defining a variable\n to the tiles of those dimensions in the generated TileDB array.\n coords_to_dims: If ``True``, convert the NetCDF coordinate variable into a\n TileDB dimension for sparse arrays. Otherwise, convert the coordinate\n dimension into a TileDB dimension and the coordinate variable into a\n TileDB attribute.\n collect_attrs: If ``True``, store all attributes with the same dimensions in\n the same array. Otherwise, store each attribute in a scalar array.\n unpack_vars: Unpack NetCDF variables with NetCDF attributes ``scale_factor``\n or ``add_offset`` using the transformation ``scale_factor * value +\n unpack``.\n coords_filters: Default filters for all dimensions.\n offsets_filters: Default filters for all offsets for variable attributes\n and dimensions.\n attrs_filters: Default filters for all attributes.\n copy_metadata: If ``True`` copy NetCDF group and variable attributes to\n TileDB metadata. If ``False`` do not copy metadata.\n use_virtual_groups: If ``True``, create a virtual group using ``output_uri``\n as the name for the group metadata array. All other arrays will be named\n using the convention ``{uri}_{array_name}`` where ``array_name`` is the\n name of the array.\n "
from .converter import NetCDF4ConverterEngine, open_netcdf_group
output_uri = (output_uri if (not output_uri.endswith('/')) else output_uri[:(- 1)])
if (tiles_by_var is None):
tiles_by_var = {}
if (tiles_by_dims is None):
tiles_by_dims = {}
def recursive_convert(netcdf_group):
converter = NetCDF4ConverterEngine.from_group(netcdf_group, unlimited_dim_size, dim_dtype, tiles_by_var.get(netcdf_group.path), tiles_by_dims.get(netcdf_group.path), coords_to_dims=coords_to_dims, collect_attrs=collect_attrs, unpack_vars=unpack_vars, coords_filters=coords_filters, offsets_filters=offsets_filters, attrs_filters=attrs_filters)
if use_virtual_groups:
group_uri = (output_uri if (netcdf_group.path == '/') else (output_uri + netcdf_group.path.replace('/', '_')))
converter.convert_to_virtual_group(group_uri, output_key, output_ctx, input_netcdf_group=netcdf_group, copy_metadata=copy_metadata)
else:
group_uri = (output_uri + netcdf_group.path)
converter.convert_to_group(group_uri, output_key, output_ctx, input_netcdf_group=netcdf_group, copy_metadata=copy_metadata)
if recursive:
for subgroup in netcdf_group.groups.values():
recursive_convert(subgroup)
with open_netcdf_group(input_file=input_file, group_path=input_group_path) as dataset:
recursive_convert(dataset)<|docstring|>Converts a NetCDF input file to nested TileDB CF dataspaces.
See :class:`~tiledb.cf.NetCDF4ConverterEngine` for more
information on the backend converter engine used for the conversion.
Parameters:
input_file: The input NetCDF file to generate the converter engine from.
output_uri: The uniform resource identifier for the TileDB group to be created.
input_group_path: The path to the NetCDF group to copy data from. Use ``'/'``
for the root group.
recursive: If ``True``, recursively convert groups in a NetCDF file. Otherwise,
only convert group provided.
output_key: If not ``None``, encryption key to decrypt arrays.
output_ctx: If not ``None``, TileDB context wrapper for a TileDB storage
manager.
dim_dtype: The numpy dtype for the TileDB dimensions created from NetCDF
dimensions.
unlimited_dim_size: The size of the domain for TileDB dimensions created
from unlimited NetCDF dimensions.
dim_dtype: The numpy dtype for TileDB dimensions.
tiles_by_var: A map from the name of a NetCDF variable to the tiles of the
dimensions of the variable in the generated TileDB array.
tiles_by_dims: A map from the name of NetCDF dimensions defining a variable
to the tiles of those dimensions in the generated TileDB array.
coords_to_dims: If ``True``, convert the NetCDF coordinate variable into a
TileDB dimension for sparse arrays. Otherwise, convert the coordinate
dimension into a TileDB dimension and the coordinate variable into a
TileDB attribute.
collect_attrs: If ``True``, store all attributes with the same dimensions in
the same array. Otherwise, store each attribute in a scalar array.
unpack_vars: Unpack NetCDF variables with NetCDF attributes ``scale_factor``
or ``add_offset`` using the transformation ``scale_factor * value +
unpack``.
coords_filters: Default filters for all dimensions.
offsets_filters: Default filters for all offsets for variable attributes
and dimensions.
attrs_filters: Default filters for all attributes.
copy_metadata: If ``True`` copy NetCDF group and variable attributes to
TileDB metadata. If ``False`` do not copy metadata.
use_virtual_groups: If ``True``, create a virtual group using ``output_uri``
as the name for the group metadata array. All other arrays will be named
using the convention ``{uri}_{array_name}`` where ``array_name`` is the
name of the array.<|endoftext|> |
3caaec1d55e759d1aabcf2eeeb3632f8ffd22c7f13ff483e70939307c47d6686 | def 取当前图片(self):
'返回当此控件显示的非活动位图;查看SetInactiveBitmap 更多信息'
return self.GetInactiveBitmap() | 返回当此控件显示的非活动位图;查看SetInactiveBitmap 更多信息 | pyefun/wxefun/component/AnimationCtrl.py | 取当前图片 | liguoqing-byte/pyefun | 94 | python | def 取当前图片(self):
return self.GetInactiveBitmap() | def 取当前图片(self):
return self.GetInactiveBitmap()<|docstring|>返回当此控件显示的非活动位图;查看SetInactiveBitmap 更多信息<|endoftext|> |
0ade209f6b871b62b093f2e4c6fa76901473fcd129724baf4229c5abe01f091d | @组件_异常检测
def 载入动画_流(self, 文件):
'从给定的流中加载动画并调用SetAnimation'
return self.Load(文件) | 从给定的流中加载动画并调用SetAnimation | pyefun/wxefun/component/AnimationCtrl.py | 载入动画_流 | liguoqing-byte/pyefun | 94 | python | @组件_异常检测
def 载入动画_流(self, 文件):
return self.Load(文件) | @组件_异常检测
def 载入动画_流(self, 文件):
return self.Load(文件)<|docstring|>从给定的流中加载动画并调用SetAnimation<|endoftext|> |
ff87b4811ac6a7c84a5c70887a26d355a84a783b6709f5d2c4803bf2bdd9e687 | @组件_异常检测
def 载入动画_文件(self, 文件):
'从给定的文件加载动画并调用SetAnimation。'
return self.LoadFile(文件) | 从给定的文件加载动画并调用SetAnimation。 | pyefun/wxefun/component/AnimationCtrl.py | 载入动画_文件 | liguoqing-byte/pyefun | 94 | python | @组件_异常检测
def 载入动画_文件(self, 文件):
return self.LoadFile(文件) | @组件_异常检测
def 载入动画_文件(self, 文件):
return self.LoadFile(文件)<|docstring|>从给定的文件加载动画并调用SetAnimation。<|endoftext|> |
5c69ddeafb83589cf785f7441f80b1046e4bb75ed2bdd070adf44048fb346337 | @组件_异常检测
def 载入动画(self, 动画):
'设置动画在此控件中播放'
return self.SetAnimation(动画) | 设置动画在此控件中播放 | pyefun/wxefun/component/AnimationCtrl.py | 载入动画 | liguoqing-byte/pyefun | 94 | python | @组件_异常检测
def 载入动画(self, 动画):
return self.SetAnimation(动画) | @组件_异常检测
def 载入动画(self, 动画):
return self.SetAnimation(动画)<|docstring|>设置动画在此控件中播放<|endoftext|> |
4b5e5b9ef53b84cf92ebb6686e523a36b342931f4219c3c53f5b7a7d358eedbd | @组件_异常检测
def 置默认显示图片(self, 图片):
'设置位图在不播放动画时显示在控件上。'
return self.SetInactiveBitmap(图片) | 设置位图在不播放动画时显示在控件上。 | pyefun/wxefun/component/AnimationCtrl.py | 置默认显示图片 | liguoqing-byte/pyefun | 94 | python | @组件_异常检测
def 置默认显示图片(self, 图片):
return self.SetInactiveBitmap(图片) | @组件_异常检测
def 置默认显示图片(self, 图片):
return self.SetInactiveBitmap(图片)<|docstring|>设置位图在不播放动画时显示在控件上。<|endoftext|> |
202fc783d766386f8b95815a56b600985d648d2fcec9501d723887b0ed6ccc4b | def loadExtensions(vdb, trace):
'\n Actually load all known extensions here.\n '
plat = trace.getMeta('Platform').lower()
arch = trace.getMeta('Architecture').lower()
if (plat in __all__):
mod = __import__(('vdb.extensions.%s' % plat), 0, 0, 1)
mod.vdbExtension(vdb, trace)
if (arch in __all__):
mod = __import__(('vdb.extensions.%s' % arch), 0, 0, 1)
mod.vdbExtension(vdb, trace)
extdir = os.getenv('VDB_EXT_PATH')
if (extdir is None):
extdir = os.path.abspath(os.path.join('vdb', 'ext'))
for dirname in extdir.split(os.pathsep):
if (not os.path.isdir(dirname)):
vdb.vprint(('Invalid VDB_EXT_PATH dir: %s' % dirname))
continue
if (dirname not in sys.path):
sys.path.append(dirname)
for fname in os.listdir(dirname):
modpath = os.path.join(dirname, fname)
if os.path.isdir(modpath):
modpath = os.path.join(modpath, '__init__.py')
if (not os.path.exists(modpath)):
continue
if ((not fname.endswith('.py')) or (fname == '__init__.py')):
continue
try:
spec = importlib.util.spec_from_file_location(fname, modpath)
module = importlib.util.module_from_spec(spec)
module.vdb = vdb
module.__file__ = modpath
spec.loader.exec_module(module)
module.vdbExtension(vdb, trace)
vdb.addExtension(fname, module)
except Exception:
vdb.vprint(('VDB Extension Error: %s' % modpath))
vdb.vprint(traceback.format_exc()) | Actually load all known extensions here. | vdb/extensions/__init__.py | loadExtensions | TomSomerville/vivisect | 716 | python | def loadExtensions(vdb, trace):
'\n \n '
plat = trace.getMeta('Platform').lower()
arch = trace.getMeta('Architecture').lower()
if (plat in __all__):
mod = __import__(('vdb.extensions.%s' % plat), 0, 0, 1)
mod.vdbExtension(vdb, trace)
if (arch in __all__):
mod = __import__(('vdb.extensions.%s' % arch), 0, 0, 1)
mod.vdbExtension(vdb, trace)
extdir = os.getenv('VDB_EXT_PATH')
if (extdir is None):
extdir = os.path.abspath(os.path.join('vdb', 'ext'))
for dirname in extdir.split(os.pathsep):
if (not os.path.isdir(dirname)):
vdb.vprint(('Invalid VDB_EXT_PATH dir: %s' % dirname))
continue
if (dirname not in sys.path):
sys.path.append(dirname)
for fname in os.listdir(dirname):
modpath = os.path.join(dirname, fname)
if os.path.isdir(modpath):
modpath = os.path.join(modpath, '__init__.py')
if (not os.path.exists(modpath)):
continue
if ((not fname.endswith('.py')) or (fname == '__init__.py')):
continue
try:
spec = importlib.util.spec_from_file_location(fname, modpath)
module = importlib.util.module_from_spec(spec)
module.vdb = vdb
module.__file__ = modpath
spec.loader.exec_module(module)
module.vdbExtension(vdb, trace)
vdb.addExtension(fname, module)
except Exception:
vdb.vprint(('VDB Extension Error: %s' % modpath))
vdb.vprint(traceback.format_exc()) | def loadExtensions(vdb, trace):
'\n \n '
plat = trace.getMeta('Platform').lower()
arch = trace.getMeta('Architecture').lower()
if (plat in __all__):
mod = __import__(('vdb.extensions.%s' % plat), 0, 0, 1)
mod.vdbExtension(vdb, trace)
if (arch in __all__):
mod = __import__(('vdb.extensions.%s' % arch), 0, 0, 1)
mod.vdbExtension(vdb, trace)
extdir = os.getenv('VDB_EXT_PATH')
if (extdir is None):
extdir = os.path.abspath(os.path.join('vdb', 'ext'))
for dirname in extdir.split(os.pathsep):
if (not os.path.isdir(dirname)):
vdb.vprint(('Invalid VDB_EXT_PATH dir: %s' % dirname))
continue
if (dirname not in sys.path):
sys.path.append(dirname)
for fname in os.listdir(dirname):
modpath = os.path.join(dirname, fname)
if os.path.isdir(modpath):
modpath = os.path.join(modpath, '__init__.py')
if (not os.path.exists(modpath)):
continue
if ((not fname.endswith('.py')) or (fname == '__init__.py')):
continue
try:
spec = importlib.util.spec_from_file_location(fname, modpath)
module = importlib.util.module_from_spec(spec)
module.vdb = vdb
module.__file__ = modpath
spec.loader.exec_module(module)
module.vdbExtension(vdb, trace)
vdb.addExtension(fname, module)
except Exception:
vdb.vprint(('VDB Extension Error: %s' % modpath))
vdb.vprint(traceback.format_exc())<|docstring|>Actually load all known extensions here.<|endoftext|> |
5f78f64e0115cff7ced8d37b4a2cf201685bf7dfee31581addeb06bbaee880f7 | def Init(self, node):
'\n Called when Cinema 4D Initialize the TagData (used to define, default values)\n :param node: The instance of the TagData.\n :type node: c4d.GeListNode\n :return: True on success, otherwise False.\n '
data = DataContainer(node.GetDataInstance())
data.strength = 1.0
data.resultRotation = c4d.Vector(0, 0, 0)
self.previousFrame = 0
data.targetOffset = c4d.Vector(0, 0, 100)
data.startTime = 0.0
data.upVector = VECTOR_YPLUS
data.aimVector = VECTOR_ZPLUS
data.squashStretchStretchStrength = 0.0
data.squashStretchSquashStrength = 0.0
data.stiffness = 0.1
data.mass = 0.9
data.damping = 0.75
data.gravity = c4d.Vector(0, (- 981.0), 0)
self.Reset(node)
c4d.EventAdd()
return True | Called when Cinema 4D Initialize the TagData (used to define, default values)
:param node: The instance of the TagData.
:type node: c4d.GeListNode
:return: True on success, otherwise False. | tjiggle.py | Init | beesperester/cinema4d-jiggle | 1 | python | def Init(self, node):
'\n Called when Cinema 4D Initialize the TagData (used to define, default values)\n :param node: The instance of the TagData.\n :type node: c4d.GeListNode\n :return: True on success, otherwise False.\n '
data = DataContainer(node.GetDataInstance())
data.strength = 1.0
data.resultRotation = c4d.Vector(0, 0, 0)
self.previousFrame = 0
data.targetOffset = c4d.Vector(0, 0, 100)
data.startTime = 0.0
data.upVector = VECTOR_YPLUS
data.aimVector = VECTOR_ZPLUS
data.squashStretchStretchStrength = 0.0
data.squashStretchSquashStrength = 0.0
data.stiffness = 0.1
data.mass = 0.9
data.damping = 0.75
data.gravity = c4d.Vector(0, (- 981.0), 0)
self.Reset(node)
c4d.EventAdd()
return True | def Init(self, node):
'\n Called when Cinema 4D Initialize the TagData (used to define, default values)\n :param node: The instance of the TagData.\n :type node: c4d.GeListNode\n :return: True on success, otherwise False.\n '
data = DataContainer(node.GetDataInstance())
data.strength = 1.0
data.resultRotation = c4d.Vector(0, 0, 0)
self.previousFrame = 0
data.targetOffset = c4d.Vector(0, 0, 100)
data.startTime = 0.0
data.upVector = VECTOR_YPLUS
data.aimVector = VECTOR_ZPLUS
data.squashStretchStretchStrength = 0.0
data.squashStretchSquashStrength = 0.0
data.stiffness = 0.1
data.mass = 0.9
data.damping = 0.75
data.gravity = c4d.Vector(0, (- 981.0), 0)
self.Reset(node)
c4d.EventAdd()
return True<|docstring|>Called when Cinema 4D Initialize the TagData (used to define, default values)
:param node: The instance of the TagData.
:type node: c4d.GeListNode
:return: True on success, otherwise False.<|endoftext|> |
4ba0dfe3adc4044c3a739d16076fbc2b77c3e9838af2ce26f9be7c04d983fd14 | def GetHandleCount(self, op):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :return:\n '
return 1 | :param op: The host object of the tag.
:type op: c4d.BaseObject
:return: | tjiggle.py | GetHandleCount | beesperester/cinema4d-jiggle | 1 | python | def GetHandleCount(self, op):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :return:\n '
return 1 | def GetHandleCount(self, op):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :return:\n '
return 1<|docstring|>:param op: The host object of the tag.
:type op: c4d.BaseObject
:return:<|endoftext|> |
05df916f30235a17aaffc319b9863164aab1a899286c29af066b7f7d5a3eda46 | def GetHandle(self, op, i, info):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param i: Index of handle\n :type i: int\n :param info: Info of handle\n :type info: c4d.HandleInfo\n :return:\n '
data = DataContainer(op.GetDataInstance())
info.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
info.type = c4d.HANDLECONSTRAINTTYPE_FREE | :param op: The host object of the tag.
:type op: c4d.BaseObject
:param i: Index of handle
:type i: int
:param info: Info of handle
:type info: c4d.HandleInfo
:return: | tjiggle.py | GetHandle | beesperester/cinema4d-jiggle | 1 | python | def GetHandle(self, op, i, info):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param i: Index of handle\n :type i: int\n :param info: Info of handle\n :type info: c4d.HandleInfo\n :return:\n '
data = DataContainer(op.GetDataInstance())
info.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
info.type = c4d.HANDLECONSTRAINTTYPE_FREE | def GetHandle(self, op, i, info):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param i: Index of handle\n :type i: int\n :param info: Info of handle\n :type info: c4d.HandleInfo\n :return:\n '
data = DataContainer(op.GetDataInstance())
info.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
info.type = c4d.HANDLECONSTRAINTTYPE_FREE<|docstring|>:param op: The host object of the tag.
:type op: c4d.BaseObject
:param i: Index of handle
:type i: int
:param info: Info of handle
:type info: c4d.HandleInfo
:return:<|endoftext|> |
bc62b4e28a32b7f48cf5cc1cdde8135f0b86219f50c051ac73c5d0dd96eac25f | def SetHandle(self, op, i, p, info):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param i: Index of handle\n :type i: int\n :param p: Handle Position\n :type p: c4d.Vector\n :param info: Info of handle\n :type info: c4d.HandleInfo\n :return:\n '
data = DataContainer(op.GetDataInstance())
data.targetOffset = (p * (~ data.originObject.GetMg())) | :param op: The host object of the tag.
:type op: c4d.BaseObject
:param i: Index of handle
:type i: int
:param p: Handle Position
:type p: c4d.Vector
:param info: Info of handle
:type info: c4d.HandleInfo
:return: | tjiggle.py | SetHandle | beesperester/cinema4d-jiggle | 1 | python | def SetHandle(self, op, i, p, info):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param i: Index of handle\n :type i: int\n :param p: Handle Position\n :type p: c4d.Vector\n :param info: Info of handle\n :type info: c4d.HandleInfo\n :return:\n '
data = DataContainer(op.GetDataInstance())
data.targetOffset = (p * (~ data.originObject.GetMg())) | def SetHandle(self, op, i, p, info):
'\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param i: Index of handle\n :type i: int\n :param p: Handle Position\n :type p: c4d.Vector\n :param info: Info of handle\n :type info: c4d.HandleInfo\n :return:\n '
data = DataContainer(op.GetDataInstance())
data.targetOffset = (p * (~ data.originObject.GetMg()))<|docstring|>:param op: The host object of the tag.
:type op: c4d.BaseObject
:param i: Index of handle
:type i: int
:param p: Handle Position
:type p: c4d.Vector
:param info: Info of handle
:type info: c4d.HandleInfo
:return:<|endoftext|> |
72d915c43c7f141505614e6018bcc8a29f375ff333276b8a3bebe650846db407 | def Execute(self, tag, doc, op, bt, priority, flags):
"\n Called by Cinema 4D at each Scene Execution, this is the place where calculation should take place.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :param doc: The host document of the tag's object.\n :type doc: c4d.documents.BaseDocument\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param bt: The Thread that execute the this TagData.\n :type bt: c4d.threading.BaseThread\n :param priority: Information about the execution priority of this TagData.\n :type priority: EXECUTIONPRIORITY\n :param flags: Information about when this TagData is executed.\n :type flags: EXECUTIONFLAGS\n :return:\n "
data = DataContainer(tag.GetDataInstance())
fps = doc.GetFps()
currentFrame = float(Jiggle.GetFrame(doc.GetTime(), fps))
originMatrix = data.originObject.GetMg()
originPosition = originMatrix.off
projectedPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
if (currentFrame > data.startTime):
if (currentFrame == (self.previousFrame + 1.0)):
self.Update(tag, doc, op)
else:
self.Reset(tag)
targetPosition = c4d.utils.MixVec(projectedPosition, self.position, data.strength)
aim = c4d.Vector((targetPosition - originPosition)).GetNormalized()
if (data.upVector == VECTOR_XPLUS):
up = originMatrix.MulV(c4d.Vector(1.0, 0, 0))
elif (data.upVector == VECTOR_XMINUS):
up = originMatrix.MulV(c4d.Vector((- 1.0), 0, 0))
elif (data.upVector == VECTOR_YPLUS):
up = originMatrix.MulV(c4d.Vector(0, 1.0, 0))
elif (data.upVector == VECTOR_YMINUS):
up = originMatrix.MulV(c4d.Vector(0, (- 1.0), 0))
elif (data.upVector == VECTOR_ZPLUS):
up = originMatrix.MulV(c4d.Vector(0, 0, 1.0))
elif (data.upVector == VECTOR_ZMINUS):
up = originMatrix.MulV(c4d.Vector(0, 0, (- 1.0)))
side = up.Cross(aim)
if data.squashStretchEnable:
distance = c4d.Vector((targetPosition - originPosition)).GetLength()
maxDistance = data.targetOffset.GetLength()
relativeDistance = (distance - maxDistance)
try:
squashStretchBias = (abs(relativeDistance) / maxDistance)
except ZeroDivisionError:
squashStretchBias = 0.0
if (relativeDistance > 0.0):
squashStretchBias = (squashStretchBias * data.squashStretchStretchStrength)
aim = (aim * (1.0 + squashStretchBias))
up = (up * (1.0 - squashStretchBias))
side = (side * (1.0 - squashStretchBias))
else:
squashStretchBias = (squashStretchBias * data.squashStretchSquashStrength)
aim = (aim * (1.0 - squashStretchBias))
up = (up * (1.0 + squashStretchBias))
side = (side * (1.0 + squashStretchBias))
if (data.aimVector == VECTOR_XPLUS):
jiggleMatrix = c4d.Matrix(originPosition, aim, up, side)
elif (data.aimVector == VECTOR_XMINUS):
jiggleMatrix = c4d.Matrix(originPosition, (- aim), up, side)
elif (data.aimVector == VECTOR_YPLUS):
jiggleMatrix = c4d.Matrix(originPosition, side, aim, up)
elif (data.aimVector == VECTOR_YMINUS):
jiggleMatrix = c4d.Matrix(originPosition, side, (- aim), up)
elif (data.aimVector == VECTOR_ZPLUS):
jiggleMatrix = c4d.Matrix(originPosition, side, up, aim)
elif (data.aimVector == VECTOR_ZMINUS):
jiggleMatrix = c4d.Matrix(originPosition, side, up, (- aim))
op.SetMg(jiggleMatrix)
self.previousFrame = currentFrame
return c4d.EXECUTIONRESULT_OK | Called by Cinema 4D at each Scene Execution, this is the place where calculation should take place.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:param doc: The host document of the tag's object.
:type doc: c4d.documents.BaseDocument
:param op: The host object of the tag.
:type op: c4d.BaseObject
:param bt: The Thread that execute the this TagData.
:type bt: c4d.threading.BaseThread
:param priority: Information about the execution priority of this TagData.
:type priority: EXECUTIONPRIORITY
:param flags: Information about when this TagData is executed.
:type flags: EXECUTIONFLAGS
:return: | tjiggle.py | Execute | beesperester/cinema4d-jiggle | 1 | python | def Execute(self, tag, doc, op, bt, priority, flags):
"\n Called by Cinema 4D at each Scene Execution, this is the place where calculation should take place.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :param doc: The host document of the tag's object.\n :type doc: c4d.documents.BaseDocument\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param bt: The Thread that execute the this TagData.\n :type bt: c4d.threading.BaseThread\n :param priority: Information about the execution priority of this TagData.\n :type priority: EXECUTIONPRIORITY\n :param flags: Information about when this TagData is executed.\n :type flags: EXECUTIONFLAGS\n :return:\n "
data = DataContainer(tag.GetDataInstance())
fps = doc.GetFps()
currentFrame = float(Jiggle.GetFrame(doc.GetTime(), fps))
originMatrix = data.originObject.GetMg()
originPosition = originMatrix.off
projectedPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
if (currentFrame > data.startTime):
if (currentFrame == (self.previousFrame + 1.0)):
self.Update(tag, doc, op)
else:
self.Reset(tag)
targetPosition = c4d.utils.MixVec(projectedPosition, self.position, data.strength)
aim = c4d.Vector((targetPosition - originPosition)).GetNormalized()
if (data.upVector == VECTOR_XPLUS):
up = originMatrix.MulV(c4d.Vector(1.0, 0, 0))
elif (data.upVector == VECTOR_XMINUS):
up = originMatrix.MulV(c4d.Vector((- 1.0), 0, 0))
elif (data.upVector == VECTOR_YPLUS):
up = originMatrix.MulV(c4d.Vector(0, 1.0, 0))
elif (data.upVector == VECTOR_YMINUS):
up = originMatrix.MulV(c4d.Vector(0, (- 1.0), 0))
elif (data.upVector == VECTOR_ZPLUS):
up = originMatrix.MulV(c4d.Vector(0, 0, 1.0))
elif (data.upVector == VECTOR_ZMINUS):
up = originMatrix.MulV(c4d.Vector(0, 0, (- 1.0)))
side = up.Cross(aim)
if data.squashStretchEnable:
distance = c4d.Vector((targetPosition - originPosition)).GetLength()
maxDistance = data.targetOffset.GetLength()
relativeDistance = (distance - maxDistance)
try:
squashStretchBias = (abs(relativeDistance) / maxDistance)
except ZeroDivisionError:
squashStretchBias = 0.0
if (relativeDistance > 0.0):
squashStretchBias = (squashStretchBias * data.squashStretchStretchStrength)
aim = (aim * (1.0 + squashStretchBias))
up = (up * (1.0 - squashStretchBias))
side = (side * (1.0 - squashStretchBias))
else:
squashStretchBias = (squashStretchBias * data.squashStretchSquashStrength)
aim = (aim * (1.0 - squashStretchBias))
up = (up * (1.0 + squashStretchBias))
side = (side * (1.0 + squashStretchBias))
if (data.aimVector == VECTOR_XPLUS):
jiggleMatrix = c4d.Matrix(originPosition, aim, up, side)
elif (data.aimVector == VECTOR_XMINUS):
jiggleMatrix = c4d.Matrix(originPosition, (- aim), up, side)
elif (data.aimVector == VECTOR_YPLUS):
jiggleMatrix = c4d.Matrix(originPosition, side, aim, up)
elif (data.aimVector == VECTOR_YMINUS):
jiggleMatrix = c4d.Matrix(originPosition, side, (- aim), up)
elif (data.aimVector == VECTOR_ZPLUS):
jiggleMatrix = c4d.Matrix(originPosition, side, up, aim)
elif (data.aimVector == VECTOR_ZMINUS):
jiggleMatrix = c4d.Matrix(originPosition, side, up, (- aim))
op.SetMg(jiggleMatrix)
self.previousFrame = currentFrame
return c4d.EXECUTIONRESULT_OK | def Execute(self, tag, doc, op, bt, priority, flags):
"\n Called by Cinema 4D at each Scene Execution, this is the place where calculation should take place.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :param doc: The host document of the tag's object.\n :type doc: c4d.documents.BaseDocument\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :param bt: The Thread that execute the this TagData.\n :type bt: c4d.threading.BaseThread\n :param priority: Information about the execution priority of this TagData.\n :type priority: EXECUTIONPRIORITY\n :param flags: Information about when this TagData is executed.\n :type flags: EXECUTIONFLAGS\n :return:\n "
data = DataContainer(tag.GetDataInstance())
fps = doc.GetFps()
currentFrame = float(Jiggle.GetFrame(doc.GetTime(), fps))
originMatrix = data.originObject.GetMg()
originPosition = originMatrix.off
projectedPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
if (currentFrame > data.startTime):
if (currentFrame == (self.previousFrame + 1.0)):
self.Update(tag, doc, op)
else:
self.Reset(tag)
targetPosition = c4d.utils.MixVec(projectedPosition, self.position, data.strength)
aim = c4d.Vector((targetPosition - originPosition)).GetNormalized()
if (data.upVector == VECTOR_XPLUS):
up = originMatrix.MulV(c4d.Vector(1.0, 0, 0))
elif (data.upVector == VECTOR_XMINUS):
up = originMatrix.MulV(c4d.Vector((- 1.0), 0, 0))
elif (data.upVector == VECTOR_YPLUS):
up = originMatrix.MulV(c4d.Vector(0, 1.0, 0))
elif (data.upVector == VECTOR_YMINUS):
up = originMatrix.MulV(c4d.Vector(0, (- 1.0), 0))
elif (data.upVector == VECTOR_ZPLUS):
up = originMatrix.MulV(c4d.Vector(0, 0, 1.0))
elif (data.upVector == VECTOR_ZMINUS):
up = originMatrix.MulV(c4d.Vector(0, 0, (- 1.0)))
side = up.Cross(aim)
if data.squashStretchEnable:
distance = c4d.Vector((targetPosition - originPosition)).GetLength()
maxDistance = data.targetOffset.GetLength()
relativeDistance = (distance - maxDistance)
try:
squashStretchBias = (abs(relativeDistance) / maxDistance)
except ZeroDivisionError:
squashStretchBias = 0.0
if (relativeDistance > 0.0):
squashStretchBias = (squashStretchBias * data.squashStretchStretchStrength)
aim = (aim * (1.0 + squashStretchBias))
up = (up * (1.0 - squashStretchBias))
side = (side * (1.0 - squashStretchBias))
else:
squashStretchBias = (squashStretchBias * data.squashStretchSquashStrength)
aim = (aim * (1.0 - squashStretchBias))
up = (up * (1.0 + squashStretchBias))
side = (side * (1.0 + squashStretchBias))
if (data.aimVector == VECTOR_XPLUS):
jiggleMatrix = c4d.Matrix(originPosition, aim, up, side)
elif (data.aimVector == VECTOR_XMINUS):
jiggleMatrix = c4d.Matrix(originPosition, (- aim), up, side)
elif (data.aimVector == VECTOR_YPLUS):
jiggleMatrix = c4d.Matrix(originPosition, side, aim, up)
elif (data.aimVector == VECTOR_YMINUS):
jiggleMatrix = c4d.Matrix(originPosition, side, (- aim), up)
elif (data.aimVector == VECTOR_ZPLUS):
jiggleMatrix = c4d.Matrix(originPosition, side, up, aim)
elif (data.aimVector == VECTOR_ZMINUS):
jiggleMatrix = c4d.Matrix(originPosition, side, up, (- aim))
op.SetMg(jiggleMatrix)
self.previousFrame = currentFrame
return c4d.EXECUTIONRESULT_OK<|docstring|>Called by Cinema 4D at each Scene Execution, this is the place where calculation should take place.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:param doc: The host document of the tag's object.
:type doc: c4d.documents.BaseDocument
:param op: The host object of the tag.
:type op: c4d.BaseObject
:param bt: The Thread that execute the this TagData.
:type bt: c4d.threading.BaseThread
:param priority: Information about the execution priority of this TagData.
:type priority: EXECUTIONPRIORITY
:param flags: Information about when this TagData is executed.
:type flags: EXECUTIONFLAGS
:return:<|endoftext|> |
7945a2565a9dd3ac9de91d2cdc10d6891a25d4dc6638836c5f7b6f588991d669 | def Reset(self, tag):
'\n Update loop.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :return:\n '
data = DataContainer(tag.GetDataInstance())
self.force = c4d.Vector(0, 0, 0)
self.acceleration = c4d.Vector(0, 0, 0)
self.velocity = c4d.Vector(0, 0, 0)
self.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset) | Update loop.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:return: | tjiggle.py | Reset | beesperester/cinema4d-jiggle | 1 | python | def Reset(self, tag):
'\n Update loop.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :return:\n '
data = DataContainer(tag.GetDataInstance())
self.force = c4d.Vector(0, 0, 0)
self.acceleration = c4d.Vector(0, 0, 0)
self.velocity = c4d.Vector(0, 0, 0)
self.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset) | def Reset(self, tag):
'\n Update loop.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :return:\n '
data = DataContainer(tag.GetDataInstance())
self.force = c4d.Vector(0, 0, 0)
self.acceleration = c4d.Vector(0, 0, 0)
self.velocity = c4d.Vector(0, 0, 0)
self.position = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)<|docstring|>Update loop.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:return:<|endoftext|> |
54ed1f5d105c93fc77e949f8bc730dc1f7a52884977dc80d960d072e4d11810e | def Update(self, tag, doc, op):
"\n Update loop.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :param doc: The host document of the tag's object.\n :type doc: c4d.documents.BaseDocument\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :return:\n "
data = DataContainer(tag.GetDataInstance())
targetPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
direction = (targetPosition - self.position)
self.force = ((direction * data.stiffness) + ((data.gravity / 10.0) / float(doc.GetFps())))
self.acceleration = (self.force / data.mass)
self.velocity = (self.velocity + (self.acceleration * (1.0 - data.damping)))
self.position = ((self.position + self.velocity) + self.force) | Update loop.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:param doc: The host document of the tag's object.
:type doc: c4d.documents.BaseDocument
:param op: The host object of the tag.
:type op: c4d.BaseObject
:return: | tjiggle.py | Update | beesperester/cinema4d-jiggle | 1 | python | def Update(self, tag, doc, op):
"\n Update loop.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :param doc: The host document of the tag's object.\n :type doc: c4d.documents.BaseDocument\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :return:\n "
data = DataContainer(tag.GetDataInstance())
targetPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
direction = (targetPosition - self.position)
self.force = ((direction * data.stiffness) + ((data.gravity / 10.0) / float(doc.GetFps())))
self.acceleration = (self.force / data.mass)
self.velocity = (self.velocity + (self.acceleration * (1.0 - data.damping)))
self.position = ((self.position + self.velocity) + self.force) | def Update(self, tag, doc, op):
"\n Update loop.\n :param tag: The instance of the TagData.\n :type tag: c4d.BaseTag\n :param doc: The host document of the tag's object.\n :type doc: c4d.documents.BaseDocument\n :param op: The host object of the tag.\n :type op: c4d.BaseObject\n :return:\n "
data = DataContainer(tag.GetDataInstance())
targetPosition = Jiggle.CalculateTargetPosition(data.originObject, data.targetOffset)
direction = (targetPosition - self.position)
self.force = ((direction * data.stiffness) + ((data.gravity / 10.0) / float(doc.GetFps())))
self.acceleration = (self.force / data.mass)
self.velocity = (self.velocity + (self.acceleration * (1.0 - data.damping)))
self.position = ((self.position + self.velocity) + self.force)<|docstring|>Update loop.
:param tag: The instance of the TagData.
:type tag: c4d.BaseTag
:param doc: The host document of the tag's object.
:type doc: c4d.documents.BaseDocument
:param op: The host object of the tag.
:type op: c4d.BaseObject
:return:<|endoftext|> |
bb93fa6c22ec0ca0cee03f72f7132386ca7883603e7d659ce1052301ab70270f | def load_data_multiple_runs(folder, runs, spinup_yr=1765, full=True, full_inst=False):
'Input: \n - folder must be a pathlib.Path object\n - runs is string array of runnames in this folder\n - spinup_yr [optional; if other than 1765] is an int if all simulations have equal spinup; otherwise int array\n N.B. needed since file_path = folder + runname + spinup_yr \n - full [optional] if you want (no) full_ave.nc file (e.g. not generated for runs with output every time step)\n - full_inst [optional] if you want full_inst.nc file as well (for special runs diagnosing convection or seasonal cycle)\n \n Output:\n - [datas, data_fulls (optional; default), data_full_inst(optional)] \n contains 1 to 3 dictionaries with runs; depending on chosen parameters\n\n Explanation of output:\n 1) data = data from timeseries_ave.nc output file\n 2) data_full = data from full_ave.nc output file \n 3) data_full_inst = data from full_inst.nc output file\n\n For all 3: the year axis is changed from simulation years to years they represent in C.E.\n \n Author: Jeemijn Scheen, example@example.com'
from xarray import open_dataset
from numpy import ndarray
datas = {}
if full:
data_fulls = {}
if full_inst:
data_fulls_inst = {}
subtract_yrs = spinup_yr
for (nr, runname) in enumerate(runs):
if (spinup_yr == 0):
spinup_yr_str = '0000'
else:
if isinstance(spinup_yr, (list, tuple, ndarray)):
spinup_yr = spinup_yr[nr]
spinup_yr_str = str(spinup_yr)
file = ((runname + '.000') + spinup_yr_str)
datas[runname] = open_dataset((folder / (file + '_timeseries_ave.nc')), decode_times=False)
datas[runname]['time'] -= subtract_yrs
if full:
data_fulls[runname] = open_dataset((folder / (file + '_full_ave.nc')), decode_times=False)
data_fulls[runname]['time'] -= subtract_yrs
if full_inst:
data_fulls_inst[runname] = open_dataset((folder / (file + '_full_inst.nc')), decode_times=False, chunks={'yearstep_oc': 20})
data_fulls_inst[runname]['time'] -= subtract_yrs
res = [datas]
if full:
res.append(data_fulls)
if full_inst:
res.append(data_fulls_inst)
return res | Input:
- folder must be a pathlib.Path object
- runs is string array of runnames in this folder
- spinup_yr [optional; if other than 1765] is an int if all simulations have equal spinup; otherwise int array
N.B. needed since file_path = folder + runname + spinup_yr
- full [optional] if you want (no) full_ave.nc file (e.g. not generated for runs with output every time step)
- full_inst [optional] if you want full_inst.nc file as well (for special runs diagnosing convection or seasonal cycle)
Output:
- [datas, data_fulls (optional; default), data_full_inst(optional)]
contains 1 to 3 dictionaries with runs; depending on chosen parameters
Explanation of output:
1) data = data from timeseries_ave.nc output file
2) data_full = data from full_ave.nc output file
3) data_full_inst = data from full_inst.nc output file
For all 3: the year axis is changed from simulation years to years they represent in C.E.
Author: Jeemijn Scheen, example@example.com | functions.py | load_data_multiple_runs | jeemijn/LIA | 0 | python | def load_data_multiple_runs(folder, runs, spinup_yr=1765, full=True, full_inst=False):
'Input: \n - folder must be a pathlib.Path object\n - runs is string array of runnames in this folder\n - spinup_yr [optional; if other than 1765] is an int if all simulations have equal spinup; otherwise int array\n N.B. needed since file_path = folder + runname + spinup_yr \n - full [optional] if you want (no) full_ave.nc file (e.g. not generated for runs with output every time step)\n - full_inst [optional] if you want full_inst.nc file as well (for special runs diagnosing convection or seasonal cycle)\n \n Output:\n - [datas, data_fulls (optional; default), data_full_inst(optional)] \n contains 1 to 3 dictionaries with runs; depending on chosen parameters\n\n Explanation of output:\n 1) data = data from timeseries_ave.nc output file\n 2) data_full = data from full_ave.nc output file \n 3) data_full_inst = data from full_inst.nc output file\n\n For all 3: the year axis is changed from simulation years to years they represent in C.E.\n \n Author: Jeemijn Scheen, example@example.com'
from xarray import open_dataset
from numpy import ndarray
datas = {}
if full:
data_fulls = {}
if full_inst:
data_fulls_inst = {}
subtract_yrs = spinup_yr
for (nr, runname) in enumerate(runs):
if (spinup_yr == 0):
spinup_yr_str = '0000'
else:
if isinstance(spinup_yr, (list, tuple, ndarray)):
spinup_yr = spinup_yr[nr]
spinup_yr_str = str(spinup_yr)
file = ((runname + '.000') + spinup_yr_str)
datas[runname] = open_dataset((folder / (file + '_timeseries_ave.nc')), decode_times=False)
datas[runname]['time'] -= subtract_yrs
if full:
data_fulls[runname] = open_dataset((folder / (file + '_full_ave.nc')), decode_times=False)
data_fulls[runname]['time'] -= subtract_yrs
if full_inst:
data_fulls_inst[runname] = open_dataset((folder / (file + '_full_inst.nc')), decode_times=False, chunks={'yearstep_oc': 20})
data_fulls_inst[runname]['time'] -= subtract_yrs
res = [datas]
if full:
res.append(data_fulls)
if full_inst:
res.append(data_fulls_inst)
return res | def load_data_multiple_runs(folder, runs, spinup_yr=1765, full=True, full_inst=False):
'Input: \n - folder must be a pathlib.Path object\n - runs is string array of runnames in this folder\n - spinup_yr [optional; if other than 1765] is an int if all simulations have equal spinup; otherwise int array\n N.B. needed since file_path = folder + runname + spinup_yr \n - full [optional] if you want (no) full_ave.nc file (e.g. not generated for runs with output every time step)\n - full_inst [optional] if you want full_inst.nc file as well (for special runs diagnosing convection or seasonal cycle)\n \n Output:\n - [datas, data_fulls (optional; default), data_full_inst(optional)] \n contains 1 to 3 dictionaries with runs; depending on chosen parameters\n\n Explanation of output:\n 1) data = data from timeseries_ave.nc output file\n 2) data_full = data from full_ave.nc output file \n 3) data_full_inst = data from full_inst.nc output file\n\n For all 3: the year axis is changed from simulation years to years they represent in C.E.\n \n Author: Jeemijn Scheen, example@example.com'
from xarray import open_dataset
from numpy import ndarray
datas = {}
if full:
data_fulls = {}
if full_inst:
data_fulls_inst = {}
subtract_yrs = spinup_yr
for (nr, runname) in enumerate(runs):
if (spinup_yr == 0):
spinup_yr_str = '0000'
else:
if isinstance(spinup_yr, (list, tuple, ndarray)):
spinup_yr = spinup_yr[nr]
spinup_yr_str = str(spinup_yr)
file = ((runname + '.000') + spinup_yr_str)
datas[runname] = open_dataset((folder / (file + '_timeseries_ave.nc')), decode_times=False)
datas[runname]['time'] -= subtract_yrs
if full:
data_fulls[runname] = open_dataset((folder / (file + '_full_ave.nc')), decode_times=False)
data_fulls[runname]['time'] -= subtract_yrs
if full_inst:
data_fulls_inst[runname] = open_dataset((folder / (file + '_full_inst.nc')), decode_times=False, chunks={'yearstep_oc': 20})
data_fulls_inst[runname]['time'] -= subtract_yrs
res = [datas]
if full:
res.append(data_fulls)
if full_inst:
res.append(data_fulls_inst)
return res<|docstring|>Input:
- folder must be a pathlib.Path object
- runs is string array of runnames in this folder
- spinup_yr [optional; if other than 1765] is an int if all simulations have equal spinup; otherwise int array
N.B. needed since file_path = folder + runname + spinup_yr
- full [optional] if you want (no) full_ave.nc file (e.g. not generated for runs with output every time step)
- full_inst [optional] if you want full_inst.nc file as well (for special runs diagnosing convection or seasonal cycle)
Output:
- [datas, data_fulls (optional; default), data_full_inst(optional)]
contains 1 to 3 dictionaries with runs; depending on chosen parameters
Explanation of output:
1) data = data from timeseries_ave.nc output file
2) data_full = data from full_ave.nc output file
3) data_full_inst = data from full_inst.nc output file
For all 3: the year axis is changed from simulation years to years they represent in C.E.
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
6b1eb43ab0961700501bd117a1289591feffd9eae67eb6da4cf8722a0e4f9080 | def area_mean(obj, obj_with_data_var, keep_lat=False, keep_lon=False, basin=''):
"Takes horizontal area-weighted average of a certain data_var. \n Note: another averaging method is implemented in vol_mean(): more intuitive; same result\n although this function area_mean rounds differently in the last digits (* data_var_no0 / data_var_no0 below)\n SO IT IS RECOMMENDED TO USE VOL_MEAN INSTEAD\n \n - obj must be a DataSet with data variable 'area' and coordinates 'lat_t', 'lon_t'\n - obj_with_data_var must contain the data_var wanted e.g. data_full.TEMP\n - basin can be set; otherwise the result will be too small by a fixed factor. \n options: 'pac' and 'atl' (mask 2 and 1, resp.) and 'pacso' and 'atlso' (masks 2 and 1, resp.)\n - if keep_lat is True then latitude is kept as a variable and the area_weight is only done over longitude. \n - if keep_lon is True then area_weight is only done over latitude.\n \n Author: Jeemijn Scheen, example@example.com"
if (keep_lat and keep_lon):
raise Exception('not possible to average when both keep_lat and keep_lon.')
weighted_data = (obj_with_data_var * obj.area)
if ('z_t' in obj_with_data_var.dims):
mask = obj.mask
masks = obj.masks
else:
mask = obj.mask.isel(z_t=0)
masks = obj.masks.isel(z_t=0)
if (basin == 'pac'):
data_var_no0 = obj_with_data_var.where((mask == 2)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'atl'):
data_var_no0 = obj_with_data_var.where((mask == 1)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'so'):
data_var_no0 = obj_with_data_var.where((mask == 4)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'pacso'):
data_var_no0 = obj_with_data_var.where((masks == 2)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'atlso'):
data_var_no0 = obj_with_data_var.where((masks == 1)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == ''):
data_var_no0 = obj_with_data_var.where((obj_with_data_var != 0.0), 1.0)
else:
raise Exception("basin should be empty '' or one out of: 'pac', 'atl', 'pacso', 'atlso', 'so'.")
area = ((obj.area * data_var_no0) / data_var_no0)
if keep_lat:
weights = area.sum(dim='lon_t')
return (weighted_data.sum(dim='lon_t') / weights.where((weights != 0)))
elif keep_lon:
weights = area.sum(dim='lat_t')
return (weighted_data.sum(dim='lat_t') / weights.where((weights != 0)))
else:
weights = area.sum(dim='lat_t').sum(dim='lon_t')
return (weighted_data.sum(dim='lon_t').sum(dim='lat_t') / weights.where((weights != 0))) | Takes horizontal area-weighted average of a certain data_var.
Note: another averaging method is implemented in vol_mean(): more intuitive; same result
although this function area_mean rounds differently in the last digits (* data_var_no0 / data_var_no0 below)
SO IT IS RECOMMENDED TO USE VOL_MEAN INSTEAD
- obj must be a DataSet with data variable 'area' and coordinates 'lat_t', 'lon_t'
- obj_with_data_var must contain the data_var wanted e.g. data_full.TEMP
- basin can be set; otherwise the result will be too small by a fixed factor.
options: 'pac' and 'atl' (mask 2 and 1, resp.) and 'pacso' and 'atlso' (masks 2 and 1, resp.)
- if keep_lat is True then latitude is kept as a variable and the area_weight is only done over longitude.
- if keep_lon is True then area_weight is only done over latitude.
Author: Jeemijn Scheen, example@example.com | functions.py | area_mean | jeemijn/LIA | 0 | python | def area_mean(obj, obj_with_data_var, keep_lat=False, keep_lon=False, basin=):
"Takes horizontal area-weighted average of a certain data_var. \n Note: another averaging method is implemented in vol_mean(): more intuitive; same result\n although this function area_mean rounds differently in the last digits (* data_var_no0 / data_var_no0 below)\n SO IT IS RECOMMENDED TO USE VOL_MEAN INSTEAD\n \n - obj must be a DataSet with data variable 'area' and coordinates 'lat_t', 'lon_t'\n - obj_with_data_var must contain the data_var wanted e.g. data_full.TEMP\n - basin can be set; otherwise the result will be too small by a fixed factor. \n options: 'pac' and 'atl' (mask 2 and 1, resp.) and 'pacso' and 'atlso' (masks 2 and 1, resp.)\n - if keep_lat is True then latitude is kept as a variable and the area_weight is only done over longitude. \n - if keep_lon is True then area_weight is only done over latitude.\n \n Author: Jeemijn Scheen, example@example.com"
if (keep_lat and keep_lon):
raise Exception('not possible to average when both keep_lat and keep_lon.')
weighted_data = (obj_with_data_var * obj.area)
if ('z_t' in obj_with_data_var.dims):
mask = obj.mask
masks = obj.masks
else:
mask = obj.mask.isel(z_t=0)
masks = obj.masks.isel(z_t=0)
if (basin == 'pac'):
data_var_no0 = obj_with_data_var.where((mask == 2)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'atl'):
data_var_no0 = obj_with_data_var.where((mask == 1)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'so'):
data_var_no0 = obj_with_data_var.where((mask == 4)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'pacso'):
data_var_no0 = obj_with_data_var.where((masks == 2)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'atlso'):
data_var_no0 = obj_with_data_var.where((masks == 1)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == ):
data_var_no0 = obj_with_data_var.where((obj_with_data_var != 0.0), 1.0)
else:
raise Exception("basin should be empty or one out of: 'pac', 'atl', 'pacso', 'atlso', 'so'.")
area = ((obj.area * data_var_no0) / data_var_no0)
if keep_lat:
weights = area.sum(dim='lon_t')
return (weighted_data.sum(dim='lon_t') / weights.where((weights != 0)))
elif keep_lon:
weights = area.sum(dim='lat_t')
return (weighted_data.sum(dim='lat_t') / weights.where((weights != 0)))
else:
weights = area.sum(dim='lat_t').sum(dim='lon_t')
return (weighted_data.sum(dim='lon_t').sum(dim='lat_t') / weights.where((weights != 0))) | def area_mean(obj, obj_with_data_var, keep_lat=False, keep_lon=False, basin=):
"Takes horizontal area-weighted average of a certain data_var. \n Note: another averaging method is implemented in vol_mean(): more intuitive; same result\n although this function area_mean rounds differently in the last digits (* data_var_no0 / data_var_no0 below)\n SO IT IS RECOMMENDED TO USE VOL_MEAN INSTEAD\n \n - obj must be a DataSet with data variable 'area' and coordinates 'lat_t', 'lon_t'\n - obj_with_data_var must contain the data_var wanted e.g. data_full.TEMP\n - basin can be set; otherwise the result will be too small by a fixed factor. \n options: 'pac' and 'atl' (mask 2 and 1, resp.) and 'pacso' and 'atlso' (masks 2 and 1, resp.)\n - if keep_lat is True then latitude is kept as a variable and the area_weight is only done over longitude. \n - if keep_lon is True then area_weight is only done over latitude.\n \n Author: Jeemijn Scheen, example@example.com"
if (keep_lat and keep_lon):
raise Exception('not possible to average when both keep_lat and keep_lon.')
weighted_data = (obj_with_data_var * obj.area)
if ('z_t' in obj_with_data_var.dims):
mask = obj.mask
masks = obj.masks
else:
mask = obj.mask.isel(z_t=0)
masks = obj.masks.isel(z_t=0)
if (basin == 'pac'):
data_var_no0 = obj_with_data_var.where((mask == 2)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'atl'):
data_var_no0 = obj_with_data_var.where((mask == 1)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'so'):
data_var_no0 = obj_with_data_var.where((mask == 4)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'pacso'):
data_var_no0 = obj_with_data_var.where((masks == 2)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == 'atlso'):
data_var_no0 = obj_with_data_var.where((masks == 1)).where((obj_with_data_var != 0.0), 1.0)
elif (basin == ):
data_var_no0 = obj_with_data_var.where((obj_with_data_var != 0.0), 1.0)
else:
raise Exception("basin should be empty or one out of: 'pac', 'atl', 'pacso', 'atlso', 'so'.")
area = ((obj.area * data_var_no0) / data_var_no0)
if keep_lat:
weights = area.sum(dim='lon_t')
return (weighted_data.sum(dim='lon_t') / weights.where((weights != 0)))
elif keep_lon:
weights = area.sum(dim='lat_t')
return (weighted_data.sum(dim='lat_t') / weights.where((weights != 0)))
else:
weights = area.sum(dim='lat_t').sum(dim='lon_t')
return (weighted_data.sum(dim='lon_t').sum(dim='lat_t') / weights.where((weights != 0)))<|docstring|>Takes horizontal area-weighted average of a certain data_var.
Note: another averaging method is implemented in vol_mean(): more intuitive; same result
although this function area_mean rounds differently in the last digits (* data_var_no0 / data_var_no0 below)
SO IT IS RECOMMENDED TO USE VOL_MEAN INSTEAD
- obj must be a DataSet with data variable 'area' and coordinates 'lat_t', 'lon_t'
- obj_with_data_var must contain the data_var wanted e.g. data_full.TEMP
- basin can be set; otherwise the result will be too small by a fixed factor.
options: 'pac' and 'atl' (mask 2 and 1, resp.) and 'pacso' and 'atlso' (masks 2 and 1, resp.)
- if keep_lat is True then latitude is kept as a variable and the area_weight is only done over longitude.
- if keep_lon is True then area_weight is only done over latitude.
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
24f4736aed71c3d62b8a977b1961c184015b52c6c8c9fdc6546ede7a2d7af59f | def vol_mean(data_obj, vol, keep_z=False, keep_latlon=False):
'Takes volume-weighted average of a certain data_var in horizontal and/or vertical direction. \n If the data_var has a time coord, then this time coord is always kept (output is array).\n If the data_var has a z coord, then this is kept only if keep_z is True (otherwise averaged over z as well).\n Input:\n - data_obj must be the data_var wanted (e.g. data_full.TEMP) with coords: \n lat_t & lon_t & optionally time or z_t\n - vol must contain the grid-cell volumes i.e. data_full.boxvol\n - keep_z indicates whether to keep the z_t dimension [default False]\n - keep_latlon indicates whether to keep the lat and lon dimension [default False]\n NB keep_z and keep_latlon cannot both be true.\n Output:\n - average over lat and lon (if keep_latlon is False) and over z (if keep_z is False).\n Default output: scalar\n If obj has a time coord: 1D array in time\n If keep_z is True: 1D array in z \n If keep_latlon is True: 2D array in lat,lon\n If both keep_z and time: 2D array in time and z\n If both keep_latlon and time: 3D array in time, lat and lon\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import tile, isnan, average, sort
from xarray import DataArray
obj = data_obj.copy(deep=True)
coords = obj.dims
if (keep_z and keep_latlon):
raise Exception('with keep_z and keep_latlon both True, there is no average to compute.')
if (('z_t' not in coords) and ('z_t' in vol.dims)):
weights = vol.isel(z_t=0).values
else:
weights = vol.values
if ('time' in coords):
if ('z_t' in coords):
weights = tile(weights, (len(obj.time), 1, 1, 1))
else:
weights = tile(weights, (len(obj.time), 1, 1))
try:
weights[isnan(obj.values)] = 0
obj.values[isnan(obj.values)] = 0
except:
raise Exception(((('the shape of weights ' + str(weights.shape)) + ' is not equal to that of data_var ') + str(obj.shape)))
axes = []
if (keep_latlon is False):
axes.append(coords.index('lat_t'))
axes.append(coords.index('lon_t'))
if (('z_t' in coords) and (keep_z is False)):
axes.append(coords.index('z_t'))
axes = tuple(sort(axes))
res = average(obj, axis=axes, weights=weights)
if (len(coords) == len(axes)):
return res
elif (len(res.shape) == 1):
if ('time' in coords):
return DataArray(res, coords=[obj.time], dims=['time'])
elif keep_z:
return DataArray(res, coords=[obj.z_t], dims=['z_t'])
elif (len(res.shape) == 2):
if ('time' in coords):
return DataArray(res, coords=[obj.time, obj.z_t], dims=['time', 'z_t'])
elif keep_latlon:
return DataArray(res, coords=[obj.lat_t, obj.lon_t], dims=['lat_t', 'lon_t'])
elif (len(res.shape) == 3):
return DataArray(res, coords=[obj.time, obj.lat_t, obj.lon_t], dims=['time', 'lat_t', 'lon_t'])
else:
raise Exception('something went wrong') | Takes volume-weighted average of a certain data_var in horizontal and/or vertical direction.
If the data_var has a time coord, then this time coord is always kept (output is array).
If the data_var has a z coord, then this is kept only if keep_z is True (otherwise averaged over z as well).
Input:
- data_obj must be the data_var wanted (e.g. data_full.TEMP) with coords:
lat_t & lon_t & optionally time or z_t
- vol must contain the grid-cell volumes i.e. data_full.boxvol
- keep_z indicates whether to keep the z_t dimension [default False]
- keep_latlon indicates whether to keep the lat and lon dimension [default False]
NB keep_z and keep_latlon cannot both be true.
Output:
- average over lat and lon (if keep_latlon is False) and over z (if keep_z is False).
Default output: scalar
If obj has a time coord: 1D array in time
If keep_z is True: 1D array in z
If keep_latlon is True: 2D array in lat,lon
If both keep_z and time: 2D array in time and z
If both keep_latlon and time: 3D array in time, lat and lon
Author: Jeemijn Scheen, example@example.com | functions.py | vol_mean | jeemijn/LIA | 0 | python | def vol_mean(data_obj, vol, keep_z=False, keep_latlon=False):
'Takes volume-weighted average of a certain data_var in horizontal and/or vertical direction. \n If the data_var has a time coord, then this time coord is always kept (output is array).\n If the data_var has a z coord, then this is kept only if keep_z is True (otherwise averaged over z as well).\n Input:\n - data_obj must be the data_var wanted (e.g. data_full.TEMP) with coords: \n lat_t & lon_t & optionally time or z_t\n - vol must contain the grid-cell volumes i.e. data_full.boxvol\n - keep_z indicates whether to keep the z_t dimension [default False]\n - keep_latlon indicates whether to keep the lat and lon dimension [default False]\n NB keep_z and keep_latlon cannot both be true.\n Output:\n - average over lat and lon (if keep_latlon is False) and over z (if keep_z is False).\n Default output: scalar\n If obj has a time coord: 1D array in time\n If keep_z is True: 1D array in z \n If keep_latlon is True: 2D array in lat,lon\n If both keep_z and time: 2D array in time and z\n If both keep_latlon and time: 3D array in time, lat and lon\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import tile, isnan, average, sort
from xarray import DataArray
obj = data_obj.copy(deep=True)
coords = obj.dims
if (keep_z and keep_latlon):
raise Exception('with keep_z and keep_latlon both True, there is no average to compute.')
if (('z_t' not in coords) and ('z_t' in vol.dims)):
weights = vol.isel(z_t=0).values
else:
weights = vol.values
if ('time' in coords):
if ('z_t' in coords):
weights = tile(weights, (len(obj.time), 1, 1, 1))
else:
weights = tile(weights, (len(obj.time), 1, 1))
try:
weights[isnan(obj.values)] = 0
obj.values[isnan(obj.values)] = 0
except:
raise Exception(((('the shape of weights ' + str(weights.shape)) + ' is not equal to that of data_var ') + str(obj.shape)))
axes = []
if (keep_latlon is False):
axes.append(coords.index('lat_t'))
axes.append(coords.index('lon_t'))
if (('z_t' in coords) and (keep_z is False)):
axes.append(coords.index('z_t'))
axes = tuple(sort(axes))
res = average(obj, axis=axes, weights=weights)
if (len(coords) == len(axes)):
return res
elif (len(res.shape) == 1):
if ('time' in coords):
return DataArray(res, coords=[obj.time], dims=['time'])
elif keep_z:
return DataArray(res, coords=[obj.z_t], dims=['z_t'])
elif (len(res.shape) == 2):
if ('time' in coords):
return DataArray(res, coords=[obj.time, obj.z_t], dims=['time', 'z_t'])
elif keep_latlon:
return DataArray(res, coords=[obj.lat_t, obj.lon_t], dims=['lat_t', 'lon_t'])
elif (len(res.shape) == 3):
return DataArray(res, coords=[obj.time, obj.lat_t, obj.lon_t], dims=['time', 'lat_t', 'lon_t'])
else:
raise Exception('something went wrong') | def vol_mean(data_obj, vol, keep_z=False, keep_latlon=False):
'Takes volume-weighted average of a certain data_var in horizontal and/or vertical direction. \n If the data_var has a time coord, then this time coord is always kept (output is array).\n If the data_var has a z coord, then this is kept only if keep_z is True (otherwise averaged over z as well).\n Input:\n - data_obj must be the data_var wanted (e.g. data_full.TEMP) with coords: \n lat_t & lon_t & optionally time or z_t\n - vol must contain the grid-cell volumes i.e. data_full.boxvol\n - keep_z indicates whether to keep the z_t dimension [default False]\n - keep_latlon indicates whether to keep the lat and lon dimension [default False]\n NB keep_z and keep_latlon cannot both be true.\n Output:\n - average over lat and lon (if keep_latlon is False) and over z (if keep_z is False).\n Default output: scalar\n If obj has a time coord: 1D array in time\n If keep_z is True: 1D array in z \n If keep_latlon is True: 2D array in lat,lon\n If both keep_z and time: 2D array in time and z\n If both keep_latlon and time: 3D array in time, lat and lon\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import tile, isnan, average, sort
from xarray import DataArray
obj = data_obj.copy(deep=True)
coords = obj.dims
if (keep_z and keep_latlon):
raise Exception('with keep_z and keep_latlon both True, there is no average to compute.')
if (('z_t' not in coords) and ('z_t' in vol.dims)):
weights = vol.isel(z_t=0).values
else:
weights = vol.values
if ('time' in coords):
if ('z_t' in coords):
weights = tile(weights, (len(obj.time), 1, 1, 1))
else:
weights = tile(weights, (len(obj.time), 1, 1))
try:
weights[isnan(obj.values)] = 0
obj.values[isnan(obj.values)] = 0
except:
raise Exception(((('the shape of weights ' + str(weights.shape)) + ' is not equal to that of data_var ') + str(obj.shape)))
axes = []
if (keep_latlon is False):
axes.append(coords.index('lat_t'))
axes.append(coords.index('lon_t'))
if (('z_t' in coords) and (keep_z is False)):
axes.append(coords.index('z_t'))
axes = tuple(sort(axes))
res = average(obj, axis=axes, weights=weights)
if (len(coords) == len(axes)):
return res
elif (len(res.shape) == 1):
if ('time' in coords):
return DataArray(res, coords=[obj.time], dims=['time'])
elif keep_z:
return DataArray(res, coords=[obj.z_t], dims=['z_t'])
elif (len(res.shape) == 2):
if ('time' in coords):
return DataArray(res, coords=[obj.time, obj.z_t], dims=['time', 'z_t'])
elif keep_latlon:
return DataArray(res, coords=[obj.lat_t, obj.lon_t], dims=['lat_t', 'lon_t'])
elif (len(res.shape) == 3):
return DataArray(res, coords=[obj.time, obj.lat_t, obj.lon_t], dims=['time', 'lat_t', 'lon_t'])
else:
raise Exception('something went wrong')<|docstring|>Takes volume-weighted average of a certain data_var in horizontal and/or vertical direction.
If the data_var has a time coord, then this time coord is always kept (output is array).
If the data_var has a z coord, then this is kept only if keep_z is True (otherwise averaged over z as well).
Input:
- data_obj must be the data_var wanted (e.g. data_full.TEMP) with coords:
lat_t & lon_t & optionally time or z_t
- vol must contain the grid-cell volumes i.e. data_full.boxvol
- keep_z indicates whether to keep the z_t dimension [default False]
- keep_latlon indicates whether to keep the lat and lon dimension [default False]
NB keep_z and keep_latlon cannot both be true.
Output:
- average over lat and lon (if keep_latlon is False) and over z (if keep_z is False).
Default output: scalar
If obj has a time coord: 1D array in time
If keep_z is True: 1D array in z
If keep_latlon is True: 2D array in lat,lon
If both keep_z and time: 2D array in time and z
If both keep_latlon and time: 3D array in time, lat and lon
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
9e46ac25827356d6aa8296abc076ae5e73ff6d127d29e31a0e00e50547df6521 | def area_mean_dye_regions(obj, boxvol, region=''):
"Takes area averaged over a certain dye region, regarding mask with 8 dye tracers as defined below.\n Input:\n - obj is object to average with [time, lat, lon] coords e.g. sst\n - boxvol is .boxvol object (not sliced)\n - dye is a string out of 'NADW' 'NAIW' 'SAIW' 'NPIW' 'SPIW' 'SO' 'Arctic' 'Tropics'\n Note that this function is confusing because input is the dye name of a water mass (NADW) but what is actually used\n is a certain (surface) area corresponding to that like North Atlantic. \n Output:\n - area average over requested dye region, keeping the time coordinate\n Author: Jeemijn Scheen, example@example.com"
from xarray import where
from numpy import nan
vol = boxvol.isel(z_t=0)
if (region == ''):
raise Exception('Enter a dye region')
elif (region == 'NADW'):
return vol_mean(obj[(:, 33:38, 20:32)], vol[(33:38, 20:32)])
elif (region == 'SO'):
return vol_mean(obj[(:, 0:9, :)], vol[(0:9, :)])
elif (region == 'NAIW'):
return vol_mean(obj[(:, 29:33, 19:35)], vol[(29:33, 19:35)])
elif (region == 'SAIW'):
return vol_mean(obj[(:, 9:13, 21:33)], vol[(9:13, 21:33)])
elif (region == 'NPIW'):
return vol_mean(obj[(:, 29:37, 2:14)], vol[(29:37, 2:14)])
elif (region == 'SPIW'):
return vol_mean(obj[(:, 9:13, 5:20)], vol[(9:13, 5:20)])
elif ((region == 'Arctic') or (region == 'arctic')):
mask = ((((obj.lat_t > 70) & (obj.lat_t < 75)) & (obj.lon_t > 290)) & (obj.lon_t < 375))
obj_nordic = where(mask, nan, obj)
obj_nordic = obj_nordic.transpose('time', 'lat_t', 'lon_t')
vol_nordic = where(mask, nan, vol)
obj_nordic = obj_nordic[(:, 37:40, :)]
vol_nordic = vol_nordic[(37:40, :)]
return vol_mean(obj_nordic, vol_nordic)
elif ((region == 'Tropics') or (region == 'tropics')):
mask = ((((obj.lat_t > (- 48)) & (obj.lat_t < (- 30))) & (obj.lon_t > 150)) & (obj.lon_t < 380))
obj_trop = where(mask, nan, obj)
obj_trop = obj_trop.transpose('time', 'lat_t', 'lon_t')
vol_trop = where(mask, nan, vol)
obj_trop = obj_trop[(:, 9:29, :)]
vol_trop = vol_trop[(9:29, :)]
return vol_mean(obj_trop, vol_trop)
else:
raise Exception('Enter a valid dye region') | Takes area averaged over a certain dye region, regarding mask with 8 dye tracers as defined below.
Input:
- obj is object to average with [time, lat, lon] coords e.g. sst
- boxvol is .boxvol object (not sliced)
- dye is a string out of 'NADW' 'NAIW' 'SAIW' 'NPIW' 'SPIW' 'SO' 'Arctic' 'Tropics'
Note that this function is confusing because input is the dye name of a water mass (NADW) but what is actually used
is a certain (surface) area corresponding to that like North Atlantic.
Output:
- area average over requested dye region, keeping the time coordinate
Author: Jeemijn Scheen, example@example.com | functions.py | area_mean_dye_regions | jeemijn/LIA | 0 | python | def area_mean_dye_regions(obj, boxvol, region=):
"Takes area averaged over a certain dye region, regarding mask with 8 dye tracers as defined below.\n Input:\n - obj is object to average with [time, lat, lon] coords e.g. sst\n - boxvol is .boxvol object (not sliced)\n - dye is a string out of 'NADW' 'NAIW' 'SAIW' 'NPIW' 'SPIW' 'SO' 'Arctic' 'Tropics'\n Note that this function is confusing because input is the dye name of a water mass (NADW) but what is actually used\n is a certain (surface) area corresponding to that like North Atlantic. \n Output:\n - area average over requested dye region, keeping the time coordinate\n Author: Jeemijn Scheen, example@example.com"
from xarray import where
from numpy import nan
vol = boxvol.isel(z_t=0)
if (region == ):
raise Exception('Enter a dye region')
elif (region == 'NADW'):
return vol_mean(obj[(:, 33:38, 20:32)], vol[(33:38, 20:32)])
elif (region == 'SO'):
return vol_mean(obj[(:, 0:9, :)], vol[(0:9, :)])
elif (region == 'NAIW'):
return vol_mean(obj[(:, 29:33, 19:35)], vol[(29:33, 19:35)])
elif (region == 'SAIW'):
return vol_mean(obj[(:, 9:13, 21:33)], vol[(9:13, 21:33)])
elif (region == 'NPIW'):
return vol_mean(obj[(:, 29:37, 2:14)], vol[(29:37, 2:14)])
elif (region == 'SPIW'):
return vol_mean(obj[(:, 9:13, 5:20)], vol[(9:13, 5:20)])
elif ((region == 'Arctic') or (region == 'arctic')):
mask = ((((obj.lat_t > 70) & (obj.lat_t < 75)) & (obj.lon_t > 290)) & (obj.lon_t < 375))
obj_nordic = where(mask, nan, obj)
obj_nordic = obj_nordic.transpose('time', 'lat_t', 'lon_t')
vol_nordic = where(mask, nan, vol)
obj_nordic = obj_nordic[(:, 37:40, :)]
vol_nordic = vol_nordic[(37:40, :)]
return vol_mean(obj_nordic, vol_nordic)
elif ((region == 'Tropics') or (region == 'tropics')):
mask = ((((obj.lat_t > (- 48)) & (obj.lat_t < (- 30))) & (obj.lon_t > 150)) & (obj.lon_t < 380))
obj_trop = where(mask, nan, obj)
obj_trop = obj_trop.transpose('time', 'lat_t', 'lon_t')
vol_trop = where(mask, nan, vol)
obj_trop = obj_trop[(:, 9:29, :)]
vol_trop = vol_trop[(9:29, :)]
return vol_mean(obj_trop, vol_trop)
else:
raise Exception('Enter a valid dye region') | def area_mean_dye_regions(obj, boxvol, region=):
"Takes area averaged over a certain dye region, regarding mask with 8 dye tracers as defined below.\n Input:\n - obj is object to average with [time, lat, lon] coords e.g. sst\n - boxvol is .boxvol object (not sliced)\n - dye is a string out of 'NADW' 'NAIW' 'SAIW' 'NPIW' 'SPIW' 'SO' 'Arctic' 'Tropics'\n Note that this function is confusing because input is the dye name of a water mass (NADW) but what is actually used\n is a certain (surface) area corresponding to that like North Atlantic. \n Output:\n - area average over requested dye region, keeping the time coordinate\n Author: Jeemijn Scheen, example@example.com"
from xarray import where
from numpy import nan
vol = boxvol.isel(z_t=0)
if (region == ):
raise Exception('Enter a dye region')
elif (region == 'NADW'):
return vol_mean(obj[(:, 33:38, 20:32)], vol[(33:38, 20:32)])
elif (region == 'SO'):
return vol_mean(obj[(:, 0:9, :)], vol[(0:9, :)])
elif (region == 'NAIW'):
return vol_mean(obj[(:, 29:33, 19:35)], vol[(29:33, 19:35)])
elif (region == 'SAIW'):
return vol_mean(obj[(:, 9:13, 21:33)], vol[(9:13, 21:33)])
elif (region == 'NPIW'):
return vol_mean(obj[(:, 29:37, 2:14)], vol[(29:37, 2:14)])
elif (region == 'SPIW'):
return vol_mean(obj[(:, 9:13, 5:20)], vol[(9:13, 5:20)])
elif ((region == 'Arctic') or (region == 'arctic')):
mask = ((((obj.lat_t > 70) & (obj.lat_t < 75)) & (obj.lon_t > 290)) & (obj.lon_t < 375))
obj_nordic = where(mask, nan, obj)
obj_nordic = obj_nordic.transpose('time', 'lat_t', 'lon_t')
vol_nordic = where(mask, nan, vol)
obj_nordic = obj_nordic[(:, 37:40, :)]
vol_nordic = vol_nordic[(37:40, :)]
return vol_mean(obj_nordic, vol_nordic)
elif ((region == 'Tropics') or (region == 'tropics')):
mask = ((((obj.lat_t > (- 48)) & (obj.lat_t < (- 30))) & (obj.lon_t > 150)) & (obj.lon_t < 380))
obj_trop = where(mask, nan, obj)
obj_trop = obj_trop.transpose('time', 'lat_t', 'lon_t')
vol_trop = where(mask, nan, vol)
obj_trop = obj_trop[(:, 9:29, :)]
vol_trop = vol_trop[(9:29, :)]
return vol_mean(obj_trop, vol_trop)
else:
raise Exception('Enter a valid dye region')<|docstring|>Takes area averaged over a certain dye region, regarding mask with 8 dye tracers as defined below.
Input:
- obj is object to average with [time, lat, lon] coords e.g. sst
- boxvol is .boxvol object (not sliced)
- dye is a string out of 'NADW' 'NAIW' 'SAIW' 'NPIW' 'SPIW' 'SO' 'Arctic' 'Tropics'
Note that this function is confusing because input is the dye name of a water mass (NADW) but what is actually used
is a certain (surface) area corresponding to that like North Atlantic.
Output:
- area average over requested dye region, keeping the time coordinate
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
e4bb08ecb2aec537d1c5ffcdb0c38e89337780d71bf40e8829cc663132b14fde | def temp_basin(run_t, run_f, anoms=True):
'Prepares temperature anomaly data per basin. This can be used for Hoevmiller plot or leads and lags plot.\n Input:\n - run_t is data_full of transient run\n - run_f is data_full of fixed run\n - anoms determines whether returned values are anomalies (and the unit of output)\n \n Output:\n - if anoms [default]: \n temperature anomaly in centi-Kelvin w.r.t. year 0 per basin and per simulation (transient or fixed) \n in this order:\n [pac_t, pac_f, atl_t, atl_f, so_t, so_f]\n - if not anoms:\n temperature in centi-Celsius per basin and per simulation (transient or fixed) in the same order\n \n NB the pac and atl mask exclude the southern ocean.\n \n Author: Jeemijn Scheen, example@example.com'
vol = run_t.boxvol
pac_t = vol_mean(run_t.TEMP.where((run_t.mask == 2)), vol, keep_z=True)
pac_f = vol_mean(run_f.TEMP.where((run_t.mask == 2)), vol, keep_z=True)
atl_t = vol_mean(run_t.TEMP.where((run_t.mask == 1)), vol, keep_z=True)
atl_f = vol_mean(run_f.TEMP.where((run_t.mask == 1)), vol, keep_z=True)
so_t = vol_mean(run_t.TEMP.where((run_t.mask == 4)), vol, keep_z=True)
so_f = vol_mean(run_f.TEMP.where((run_t.mask == 4)), vol, keep_z=True)
if (anoms == False):
return [(100 * x) for x in [pac_t, pac_f, atl_t, atl_f, so_t, so_f]]
else:
temp0pac = pac_t[0]
temp0atl = atl_t[0]
temp0so = so_t[0]
pac_t = ((pac_t - temp0pac) * 100)
pac_f = ((pac_f - temp0pac) * 100)
atl_t = ((atl_t - temp0atl) * 100)
atl_f = ((atl_f - temp0atl) * 100)
so_t = ((so_t - temp0so) * 100)
so_f = ((so_f - temp0so) * 100)
return [pac_t, pac_f, atl_t, atl_f, so_t, so_f] | Prepares temperature anomaly data per basin. This can be used for Hoevmiller plot or leads and lags plot.
Input:
- run_t is data_full of transient run
- run_f is data_full of fixed run
- anoms determines whether returned values are anomalies (and the unit of output)
Output:
- if anoms [default]:
temperature anomaly in centi-Kelvin w.r.t. year 0 per basin and per simulation (transient or fixed)
in this order:
[pac_t, pac_f, atl_t, atl_f, so_t, so_f]
- if not anoms:
temperature in centi-Celsius per basin and per simulation (transient or fixed) in the same order
NB the pac and atl mask exclude the southern ocean.
Author: Jeemijn Scheen, example@example.com | functions.py | temp_basin | jeemijn/LIA | 0 | python | def temp_basin(run_t, run_f, anoms=True):
'Prepares temperature anomaly data per basin. This can be used for Hoevmiller plot or leads and lags plot.\n Input:\n - run_t is data_full of transient run\n - run_f is data_full of fixed run\n - anoms determines whether returned values are anomalies (and the unit of output)\n \n Output:\n - if anoms [default]: \n temperature anomaly in centi-Kelvin w.r.t. year 0 per basin and per simulation (transient or fixed) \n in this order:\n [pac_t, pac_f, atl_t, atl_f, so_t, so_f]\n - if not anoms:\n temperature in centi-Celsius per basin and per simulation (transient or fixed) in the same order\n \n NB the pac and atl mask exclude the southern ocean.\n \n Author: Jeemijn Scheen, example@example.com'
vol = run_t.boxvol
pac_t = vol_mean(run_t.TEMP.where((run_t.mask == 2)), vol, keep_z=True)
pac_f = vol_mean(run_f.TEMP.where((run_t.mask == 2)), vol, keep_z=True)
atl_t = vol_mean(run_t.TEMP.where((run_t.mask == 1)), vol, keep_z=True)
atl_f = vol_mean(run_f.TEMP.where((run_t.mask == 1)), vol, keep_z=True)
so_t = vol_mean(run_t.TEMP.where((run_t.mask == 4)), vol, keep_z=True)
so_f = vol_mean(run_f.TEMP.where((run_t.mask == 4)), vol, keep_z=True)
if (anoms == False):
return [(100 * x) for x in [pac_t, pac_f, atl_t, atl_f, so_t, so_f]]
else:
temp0pac = pac_t[0]
temp0atl = atl_t[0]
temp0so = so_t[0]
pac_t = ((pac_t - temp0pac) * 100)
pac_f = ((pac_f - temp0pac) * 100)
atl_t = ((atl_t - temp0atl) * 100)
atl_f = ((atl_f - temp0atl) * 100)
so_t = ((so_t - temp0so) * 100)
so_f = ((so_f - temp0so) * 100)
return [pac_t, pac_f, atl_t, atl_f, so_t, so_f] | def temp_basin(run_t, run_f, anoms=True):
'Prepares temperature anomaly data per basin. This can be used for Hoevmiller plot or leads and lags plot.\n Input:\n - run_t is data_full of transient run\n - run_f is data_full of fixed run\n - anoms determines whether returned values are anomalies (and the unit of output)\n \n Output:\n - if anoms [default]: \n temperature anomaly in centi-Kelvin w.r.t. year 0 per basin and per simulation (transient or fixed) \n in this order:\n [pac_t, pac_f, atl_t, atl_f, so_t, so_f]\n - if not anoms:\n temperature in centi-Celsius per basin and per simulation (transient or fixed) in the same order\n \n NB the pac and atl mask exclude the southern ocean.\n \n Author: Jeemijn Scheen, example@example.com'
vol = run_t.boxvol
pac_t = vol_mean(run_t.TEMP.where((run_t.mask == 2)), vol, keep_z=True)
pac_f = vol_mean(run_f.TEMP.where((run_t.mask == 2)), vol, keep_z=True)
atl_t = vol_mean(run_t.TEMP.where((run_t.mask == 1)), vol, keep_z=True)
atl_f = vol_mean(run_f.TEMP.where((run_t.mask == 1)), vol, keep_z=True)
so_t = vol_mean(run_t.TEMP.where((run_t.mask == 4)), vol, keep_z=True)
so_f = vol_mean(run_f.TEMP.where((run_t.mask == 4)), vol, keep_z=True)
if (anoms == False):
return [(100 * x) for x in [pac_t, pac_f, atl_t, atl_f, so_t, so_f]]
else:
temp0pac = pac_t[0]
temp0atl = atl_t[0]
temp0so = so_t[0]
pac_t = ((pac_t - temp0pac) * 100)
pac_f = ((pac_f - temp0pac) * 100)
atl_t = ((atl_t - temp0atl) * 100)
atl_f = ((atl_f - temp0atl) * 100)
so_t = ((so_t - temp0so) * 100)
so_f = ((so_f - temp0so) * 100)
return [pac_t, pac_f, atl_t, atl_f, so_t, so_f]<|docstring|>Prepares temperature anomaly data per basin. This can be used for Hoevmiller plot or leads and lags plot.
Input:
- run_t is data_full of transient run
- run_f is data_full of fixed run
- anoms determines whether returned values are anomalies (and the unit of output)
Output:
- if anoms [default]:
temperature anomaly in centi-Kelvin w.r.t. year 0 per basin and per simulation (transient or fixed)
in this order:
[pac_t, pac_f, atl_t, atl_f, so_t, so_f]
- if not anoms:
temperature in centi-Celsius per basin and per simulation (transient or fixed) in the same order
NB the pac and atl mask exclude the southern ocean.
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
64cc537bf42955d501320236b92593cc1210fb99b28227cf7bc47730bff795ee | def find_min(obj, in_obj=None):
'Gives minimum of a dataarray in a convenient way. \n Input: an xarray dataarray called obj with a coordinate named time\n Output: [t,y] where y is the value of the minimum of the array and this occurs at time t\n Optional: in_obj = something.time is a time coordinate that has a larger stepsize (i.e. from data_full)\n Then the output is given as: [t, y, t_rounded] where t_rounded is the time closest to the minimum in coarser grid\n Author: Jeemijn Scheen, example@example.com'
minm = obj.where((obj == obj.min()), drop=True)
y = minm.item()
t = minm.time.item()
if (in_obj is not None):
t_rounded = in_obj.where((abs((in_obj - t)) == abs((in_obj - t)).min()), drop=True)
if (len(t_rounded) > 1):
t_rounded = t_rounded[1]
return [t, y, t_rounded.item()]
return [t, y] | Gives minimum of a dataarray in a convenient way.
Input: an xarray dataarray called obj with a coordinate named time
Output: [t,y] where y is the value of the minimum of the array and this occurs at time t
Optional: in_obj = something.time is a time coordinate that has a larger stepsize (i.e. from data_full)
Then the output is given as: [t, y, t_rounded] where t_rounded is the time closest to the minimum in coarser grid
Author: Jeemijn Scheen, example@example.com | functions.py | find_min | jeemijn/LIA | 0 | python | def find_min(obj, in_obj=None):
'Gives minimum of a dataarray in a convenient way. \n Input: an xarray dataarray called obj with a coordinate named time\n Output: [t,y] where y is the value of the minimum of the array and this occurs at time t\n Optional: in_obj = something.time is a time coordinate that has a larger stepsize (i.e. from data_full)\n Then the output is given as: [t, y, t_rounded] where t_rounded is the time closest to the minimum in coarser grid\n Author: Jeemijn Scheen, example@example.com'
minm = obj.where((obj == obj.min()), drop=True)
y = minm.item()
t = minm.time.item()
if (in_obj is not None):
t_rounded = in_obj.where((abs((in_obj - t)) == abs((in_obj - t)).min()), drop=True)
if (len(t_rounded) > 1):
t_rounded = t_rounded[1]
return [t, y, t_rounded.item()]
return [t, y] | def find_min(obj, in_obj=None):
'Gives minimum of a dataarray in a convenient way. \n Input: an xarray dataarray called obj with a coordinate named time\n Output: [t,y] where y is the value of the minimum of the array and this occurs at time t\n Optional: in_obj = something.time is a time coordinate that has a larger stepsize (i.e. from data_full)\n Then the output is given as: [t, y, t_rounded] where t_rounded is the time closest to the minimum in coarser grid\n Author: Jeemijn Scheen, example@example.com'
minm = obj.where((obj == obj.min()), drop=True)
y = minm.item()
t = minm.time.item()
if (in_obj is not None):
t_rounded = in_obj.where((abs((in_obj - t)) == abs((in_obj - t)).min()), drop=True)
if (len(t_rounded) > 1):
t_rounded = t_rounded[1]
return [t, y, t_rounded.item()]
return [t, y]<|docstring|>Gives minimum of a dataarray in a convenient way.
Input: an xarray dataarray called obj with a coordinate named time
Output: [t,y] where y is the value of the minimum of the array and this occurs at time t
Optional: in_obj = something.time is a time coordinate that has a larger stepsize (i.e. from data_full)
Then the output is given as: [t, y, t_rounded] where t_rounded is the time closest to the minimum in coarser grid
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
dfb420c4bd2dd9acd2eb2f9c159be8392e7336ce966de60dd5969639b57b8d15 | def find_ridges(obj, only_min=True, max_guess=600.0, min_guess=1750.0, fast=True):
'Finds the ridges of minimal and maximal values within e.g. temperature time series. \n That is, where the warming changes to a cooling or vice versa. The ridges can then be plot in a contour plot.\n \n If only_min, then a global minimum is searched for (e.g. for LIA-Industrial_warming simulations);\n else both 1 minimum and 1 maximum are searched for (e.g. for MCA-LIA-Industrial_warming simulations).\n \n Input: \n - obj must be an xarray DataArray with z_t and time coords, here: temp_diff_per_depth (values from TEMP)\n - only_min [default True] see above\n NB in this case the search is highly simplified since we can take the global minimum\n - max_guess [optional] is year C.E. of first maximum in forcing; used as a guess of first maximum at surface \n NB max_guess is not used if only_min\n - min_guess [optional] is year C.E. of first minimum in forcing; used as a guess of first minimum at surface\n - fast can be set if only_min. In this case the ridges are found faster, but less precise; namely, rounded to the \n frequency of the output (e.g. 5 years) instead of interpolating with a parabola in between.\n For contour plots with an output frequency of 5 years there is no visual difference (so use fast), \n but when using the integer number of delays it is better without rounding (without fast).\n\n Output:\n - if only_min: ridge_min\n - else: [ridge_min, ridge_max],\n where each ridge is an array over depth steps containing the year of min/max temp value at this depth step.\n \n NB IF YOU GET AN ERROR "too many values to unpack" then somewhere in calling this function or its subfunctions you did \n [a,b] = call... instead of a = call...\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import zeros, sort, array, where
delta_t = (obj.time[2] - obj.time[1]).item()
ridge_min = zeros(len(obj.z_t))
ridge_max = zeros(len(obj.z_t))
if only_min:
if fast:
for (n, z) in enumerate(obj.z_t):
minm_rough = find_min(obj.sel(z_t=z))
ridge_min[n] = minm_rough[0]
else:
for (n, z) in enumerate(obj.z_t):
minm_rough = find_min(obj.sel(z_t=z))
t_min = find_local_min(obj.sel(z_t=z), min_guess=minm_rough[0])
ridge_min[n] = t_min
else:
max_init = max_guess
min_init = min_guess
surf = obj.sel(z_t=obj.z_t[0])
[t_max_arr, t_min_arr] = find_local_min_max(surf)
off = 2.0
min_guess = (sort([(t - min_guess) for t in t_min_arr if (t > (min_init - (off * delta_t)))])[0] + min_guess)
max_guess = (sort([(t - max_guess) for t in t_max_arr if (t > (max_init - (off * delta_t)))])[0] + max_guess)
ridge_min[0] = min_guess
ridge_max[0] = max_guess
for z in range(1, len(obj.z_t)):
d_slice = obj.sel(z_t=obj.z_t[z])
[t_max_arr, t_min_arr] = find_local_min_max(d_slice)
t_min_arr = array(t_min_arr)
t_max_arr = array(t_max_arr)
t_min_arr = t_min_arr[(t_min_arr > (min_init - (off * delta_t)))]
t_max_arr = t_max_arr[(t_max_arr > (max_init - (off * delta_t)))]
min_guess = t_min_arr[where((abs((t_min_arr - min_guess)) == abs((t_min_arr - min_guess)).min()))].item()
max_guess = t_max_arr[where((abs((t_max_arr - max_guess)) == abs((t_max_arr - max_guess)).min()))].item()
ridge_min[z] = min_guess
ridge_max[z] = max_guess
if only_min:
return ridge_min
else:
return [ridge_min, ridge_max] | Finds the ridges of minimal and maximal values within e.g. temperature time series.
That is, where the warming changes to a cooling or vice versa. The ridges can then be plot in a contour plot.
If only_min, then a global minimum is searched for (e.g. for LIA-Industrial_warming simulations);
else both 1 minimum and 1 maximum are searched for (e.g. for MCA-LIA-Industrial_warming simulations).
Input:
- obj must be an xarray DataArray with z_t and time coords, here: temp_diff_per_depth (values from TEMP)
- only_min [default True] see above
NB in this case the search is highly simplified since we can take the global minimum
- max_guess [optional] is year C.E. of first maximum in forcing; used as a guess of first maximum at surface
NB max_guess is not used if only_min
- min_guess [optional] is year C.E. of first minimum in forcing; used as a guess of first minimum at surface
- fast can be set if only_min. In this case the ridges are found faster, but less precise; namely, rounded to the
frequency of the output (e.g. 5 years) instead of interpolating with a parabola in between.
For contour plots with an output frequency of 5 years there is no visual difference (so use fast),
but when using the integer number of delays it is better without rounding (without fast).
Output:
- if only_min: ridge_min
- else: [ridge_min, ridge_max],
where each ridge is an array over depth steps containing the year of min/max temp value at this depth step.
NB IF YOU GET AN ERROR "too many values to unpack" then somewhere in calling this function or its subfunctions you did
[a,b] = call... instead of a = call...
Author: Jeemijn Scheen, example@example.com | functions.py | find_ridges | jeemijn/LIA | 0 | python | def find_ridges(obj, only_min=True, max_guess=600.0, min_guess=1750.0, fast=True):
'Finds the ridges of minimal and maximal values within e.g. temperature time series. \n That is, where the warming changes to a cooling or vice versa. The ridges can then be plot in a contour plot.\n \n If only_min, then a global minimum is searched for (e.g. for LIA-Industrial_warming simulations);\n else both 1 minimum and 1 maximum are searched for (e.g. for MCA-LIA-Industrial_warming simulations).\n \n Input: \n - obj must be an xarray DataArray with z_t and time coords, here: temp_diff_per_depth (values from TEMP)\n - only_min [default True] see above\n NB in this case the search is highly simplified since we can take the global minimum\n - max_guess [optional] is year C.E. of first maximum in forcing; used as a guess of first maximum at surface \n NB max_guess is not used if only_min\n - min_guess [optional] is year C.E. of first minimum in forcing; used as a guess of first minimum at surface\n - fast can be set if only_min. In this case the ridges are found faster, but less precise; namely, rounded to the \n frequency of the output (e.g. 5 years) instead of interpolating with a parabola in between.\n For contour plots with an output frequency of 5 years there is no visual difference (so use fast), \n but when using the integer number of delays it is better without rounding (without fast).\n\n Output:\n - if only_min: ridge_min\n - else: [ridge_min, ridge_max],\n where each ridge is an array over depth steps containing the year of min/max temp value at this depth step.\n \n NB IF YOU GET AN ERROR "too many values to unpack" then somewhere in calling this function or its subfunctions you did \n [a,b] = call... instead of a = call...\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import zeros, sort, array, where
delta_t = (obj.time[2] - obj.time[1]).item()
ridge_min = zeros(len(obj.z_t))
ridge_max = zeros(len(obj.z_t))
if only_min:
if fast:
for (n, z) in enumerate(obj.z_t):
minm_rough = find_min(obj.sel(z_t=z))
ridge_min[n] = minm_rough[0]
else:
for (n, z) in enumerate(obj.z_t):
minm_rough = find_min(obj.sel(z_t=z))
t_min = find_local_min(obj.sel(z_t=z), min_guess=minm_rough[0])
ridge_min[n] = t_min
else:
max_init = max_guess
min_init = min_guess
surf = obj.sel(z_t=obj.z_t[0])
[t_max_arr, t_min_arr] = find_local_min_max(surf)
off = 2.0
min_guess = (sort([(t - min_guess) for t in t_min_arr if (t > (min_init - (off * delta_t)))])[0] + min_guess)
max_guess = (sort([(t - max_guess) for t in t_max_arr if (t > (max_init - (off * delta_t)))])[0] + max_guess)
ridge_min[0] = min_guess
ridge_max[0] = max_guess
for z in range(1, len(obj.z_t)):
d_slice = obj.sel(z_t=obj.z_t[z])
[t_max_arr, t_min_arr] = find_local_min_max(d_slice)
t_min_arr = array(t_min_arr)
t_max_arr = array(t_max_arr)
t_min_arr = t_min_arr[(t_min_arr > (min_init - (off * delta_t)))]
t_max_arr = t_max_arr[(t_max_arr > (max_init - (off * delta_t)))]
min_guess = t_min_arr[where((abs((t_min_arr - min_guess)) == abs((t_min_arr - min_guess)).min()))].item()
max_guess = t_max_arr[where((abs((t_max_arr - max_guess)) == abs((t_max_arr - max_guess)).min()))].item()
ridge_min[z] = min_guess
ridge_max[z] = max_guess
if only_min:
return ridge_min
else:
return [ridge_min, ridge_max] | def find_ridges(obj, only_min=True, max_guess=600.0, min_guess=1750.0, fast=True):
'Finds the ridges of minimal and maximal values within e.g. temperature time series. \n That is, where the warming changes to a cooling or vice versa. The ridges can then be plot in a contour plot.\n \n If only_min, then a global minimum is searched for (e.g. for LIA-Industrial_warming simulations);\n else both 1 minimum and 1 maximum are searched for (e.g. for MCA-LIA-Industrial_warming simulations).\n \n Input: \n - obj must be an xarray DataArray with z_t and time coords, here: temp_diff_per_depth (values from TEMP)\n - only_min [default True] see above\n NB in this case the search is highly simplified since we can take the global minimum\n - max_guess [optional] is year C.E. of first maximum in forcing; used as a guess of first maximum at surface \n NB max_guess is not used if only_min\n - min_guess [optional] is year C.E. of first minimum in forcing; used as a guess of first minimum at surface\n - fast can be set if only_min. In this case the ridges are found faster, but less precise; namely, rounded to the \n frequency of the output (e.g. 5 years) instead of interpolating with a parabola in between.\n For contour plots with an output frequency of 5 years there is no visual difference (so use fast), \n but when using the integer number of delays it is better without rounding (without fast).\n\n Output:\n - if only_min: ridge_min\n - else: [ridge_min, ridge_max],\n where each ridge is an array over depth steps containing the year of min/max temp value at this depth step.\n \n NB IF YOU GET AN ERROR "too many values to unpack" then somewhere in calling this function or its subfunctions you did \n [a,b] = call... instead of a = call...\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import zeros, sort, array, where
delta_t = (obj.time[2] - obj.time[1]).item()
ridge_min = zeros(len(obj.z_t))
ridge_max = zeros(len(obj.z_t))
if only_min:
if fast:
for (n, z) in enumerate(obj.z_t):
minm_rough = find_min(obj.sel(z_t=z))
ridge_min[n] = minm_rough[0]
else:
for (n, z) in enumerate(obj.z_t):
minm_rough = find_min(obj.sel(z_t=z))
t_min = find_local_min(obj.sel(z_t=z), min_guess=minm_rough[0])
ridge_min[n] = t_min
else:
max_init = max_guess
min_init = min_guess
surf = obj.sel(z_t=obj.z_t[0])
[t_max_arr, t_min_arr] = find_local_min_max(surf)
off = 2.0
min_guess = (sort([(t - min_guess) for t in t_min_arr if (t > (min_init - (off * delta_t)))])[0] + min_guess)
max_guess = (sort([(t - max_guess) for t in t_max_arr if (t > (max_init - (off * delta_t)))])[0] + max_guess)
ridge_min[0] = min_guess
ridge_max[0] = max_guess
for z in range(1, len(obj.z_t)):
d_slice = obj.sel(z_t=obj.z_t[z])
[t_max_arr, t_min_arr] = find_local_min_max(d_slice)
t_min_arr = array(t_min_arr)
t_max_arr = array(t_max_arr)
t_min_arr = t_min_arr[(t_min_arr > (min_init - (off * delta_t)))]
t_max_arr = t_max_arr[(t_max_arr > (max_init - (off * delta_t)))]
min_guess = t_min_arr[where((abs((t_min_arr - min_guess)) == abs((t_min_arr - min_guess)).min()))].item()
max_guess = t_max_arr[where((abs((t_max_arr - max_guess)) == abs((t_max_arr - max_guess)).min()))].item()
ridge_min[z] = min_guess
ridge_max[z] = max_guess
if only_min:
return ridge_min
else:
return [ridge_min, ridge_max]<|docstring|>Finds the ridges of minimal and maximal values within e.g. temperature time series.
That is, where the warming changes to a cooling or vice versa. The ridges can then be plot in a contour plot.
If only_min, then a global minimum is searched for (e.g. for LIA-Industrial_warming simulations);
else both 1 minimum and 1 maximum are searched for (e.g. for MCA-LIA-Industrial_warming simulations).
Input:
- obj must be an xarray DataArray with z_t and time coords, here: temp_diff_per_depth (values from TEMP)
- only_min [default True] see above
NB in this case the search is highly simplified since we can take the global minimum
- max_guess [optional] is year C.E. of first maximum in forcing; used as a guess of first maximum at surface
NB max_guess is not used if only_min
- min_guess [optional] is year C.E. of first minimum in forcing; used as a guess of first minimum at surface
- fast can be set if only_min. In this case the ridges are found faster, but less precise; namely, rounded to the
frequency of the output (e.g. 5 years) instead of interpolating with a parabola in between.
For contour plots with an output frequency of 5 years there is no visual difference (so use fast),
but when using the integer number of delays it is better without rounding (without fast).
Output:
- if only_min: ridge_min
- else: [ridge_min, ridge_max],
where each ridge is an array over depth steps containing the year of min/max temp value at this depth step.
NB IF YOU GET AN ERROR "too many values to unpack" then somewhere in calling this function or its subfunctions you did
[a,b] = call... instead of a = call...
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
9b7c181256870346fdbcf79f9793c0e5356cc3ca2cec19cb7823037a37112bc4 | def find_local_min(obj, min_guess=1750.0):
"This function is a wrapper around find_local_min_max(),\n combined with the magic of guess_min in find_ridges. It is only used for searching 1 min (no max).\n \n It is just a copy of part of find_ridges, but now it is also available if you are only interested \n in one depth slice (since find_ridges takes only variables with z_t coordinate).\n \n Usage:\n - trick to always find your global minimum: use global minm 'f.find_min(obj)[0]' \n (is rounded to output frequency) as value for min_guess.\n \n Author: Jeemijn Scheen, example@example.com"
from numpy import sort
t_min_arr = find_local_min_max(obj=obj, only_min=True)
delta_t = (obj.time[2] - obj.time[1]).item()
off = 2.0
min_init = min_guess
min_guess = (sort([(t - min_guess) for t in t_min_arr if (t > (min_init - (off * delta_t)))])[0] + min_guess)
return min_guess | This function is a wrapper around find_local_min_max(),
combined with the magic of guess_min in find_ridges. It is only used for searching 1 min (no max).
It is just a copy of part of find_ridges, but now it is also available if you are only interested
in one depth slice (since find_ridges takes only variables with z_t coordinate).
Usage:
- trick to always find your global minimum: use global minm 'f.find_min(obj)[0]'
(is rounded to output frequency) as value for min_guess.
Author: Jeemijn Scheen, example@example.com | functions.py | find_local_min | jeemijn/LIA | 0 | python | def find_local_min(obj, min_guess=1750.0):
"This function is a wrapper around find_local_min_max(),\n combined with the magic of guess_min in find_ridges. It is only used for searching 1 min (no max).\n \n It is just a copy of part of find_ridges, but now it is also available if you are only interested \n in one depth slice (since find_ridges takes only variables with z_t coordinate).\n \n Usage:\n - trick to always find your global minimum: use global minm 'f.find_min(obj)[0]' \n (is rounded to output frequency) as value for min_guess.\n \n Author: Jeemijn Scheen, example@example.com"
from numpy import sort
t_min_arr = find_local_min_max(obj=obj, only_min=True)
delta_t = (obj.time[2] - obj.time[1]).item()
off = 2.0
min_init = min_guess
min_guess = (sort([(t - min_guess) for t in t_min_arr if (t > (min_init - (off * delta_t)))])[0] + min_guess)
return min_guess | def find_local_min(obj, min_guess=1750.0):
"This function is a wrapper around find_local_min_max(),\n combined with the magic of guess_min in find_ridges. It is only used for searching 1 min (no max).\n \n It is just a copy of part of find_ridges, but now it is also available if you are only interested \n in one depth slice (since find_ridges takes only variables with z_t coordinate).\n \n Usage:\n - trick to always find your global minimum: use global minm 'f.find_min(obj)[0]' \n (is rounded to output frequency) as value for min_guess.\n \n Author: Jeemijn Scheen, example@example.com"
from numpy import sort
t_min_arr = find_local_min_max(obj=obj, only_min=True)
delta_t = (obj.time[2] - obj.time[1]).item()
off = 2.0
min_init = min_guess
min_guess = (sort([(t - min_guess) for t in t_min_arr if (t > (min_init - (off * delta_t)))])[0] + min_guess)
return min_guess<|docstring|>This function is a wrapper around find_local_min_max(),
combined with the magic of guess_min in find_ridges. It is only used for searching 1 min (no max).
It is just a copy of part of find_ridges, but now it is also available if you are only interested
in one depth slice (since find_ridges takes only variables with z_t coordinate).
Usage:
- trick to always find your global minimum: use global minm 'f.find_min(obj)[0]'
(is rounded to output frequency) as value for min_guess.
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
c7b5452bdf2e58a4361d3bf83d1f1bffbe6c975d1ea7e3cccfe5c77c49b86036 | def find_local_min_max(obj, only_min=False):
'Finds 2 local extrema (1 min and 1 max) in obj by fitting a parabola through each 5 consecutive data points.\n Input:\n - obj is e.g. a temperature time series at a fixed depth (a depth slice)\n - only_min [optional] if not looking for a max, only for 1 min \n Output: [t_min, t_max] as time indices\n Author: Jeemijn Scheen, example@example.com'
from numpy import asarray, round, polyfit, argmax, split, diff, average, insert, abs, sign, zeros, nan, isnan, unique
if ('z_t' in obj.dims):
raise Exception('obj must be independent of z_t; only dependent on time coordinate')
if ('time' not in obj.dims):
raise Exception('obj must be dependent on time coordinate')
delta_t = (obj.time[2] - obj.time[1]).item()
signs = sign(diff(obj))
extr = []
a_coefs = []
for (n, this_temp) in enumerate(obj):
if ((n == 0) or (n == 1) or (n == (len(obj) - 2)) or (n == (len(obj) - 1))):
continue
x_arr_steps = [(n + i) for i in range((- 2), 2)]
discard = (len(unique(signs[x_arr_steps])) == 1)
if discard:
continue
else:
x_arr = [obj.time[(n + i)].item() for i in range((- 2), 3)]
y_arr = [obj.sel(time=t) for t in x_arr]
this_time = x_arr[2]
coefs = polyfit(x_arr, y_arr, deg=2)
extr_time = ((- coefs[1]) / (2.0 * coefs[0]))
if ((extr_time > x_arr[(- 1)]) or (extr_time < x_arr[0])):
continue
else:
extr.append(extr_time)
a_coefs.append(coefs[0])
extr_min = asarray([val for (n, val) in enumerate(extr) if (a_coefs[n] > 0.0)])
extr_max = asarray([val for (n, val) in enumerate(extr) if (a_coefs[n] <= 0.0)])
sep = 1.5
split_arr_min = [(i + 1) for (i, val) in enumerate(diff(extr_min)) if (val > (sep * delta_t))]
split_arr_max = [(i + 1) for (i, val) in enumerate(diff(extr_max)) if (val > (sep * delta_t))]
extr_min = split(extr_min, split_arr_min)
extr_max = split(extr_max, split_arr_max)
for (i, subarr) in enumerate(extr_min):
extr_min[i] = average(subarr)
if (not only_min):
for (i, subarr) in enumerate(extr_max):
extr_max[i] = average(subarr)
if only_min:
return extr_min
else:
return [extr_max, extr_min] | Finds 2 local extrema (1 min and 1 max) in obj by fitting a parabola through each 5 consecutive data points.
Input:
- obj is e.g. a temperature time series at a fixed depth (a depth slice)
- only_min [optional] if not looking for a max, only for 1 min
Output: [t_min, t_max] as time indices
Author: Jeemijn Scheen, example@example.com | functions.py | find_local_min_max | jeemijn/LIA | 0 | python | def find_local_min_max(obj, only_min=False):
'Finds 2 local extrema (1 min and 1 max) in obj by fitting a parabola through each 5 consecutive data points.\n Input:\n - obj is e.g. a temperature time series at a fixed depth (a depth slice)\n - only_min [optional] if not looking for a max, only for 1 min \n Output: [t_min, t_max] as time indices\n Author: Jeemijn Scheen, example@example.com'
from numpy import asarray, round, polyfit, argmax, split, diff, average, insert, abs, sign, zeros, nan, isnan, unique
if ('z_t' in obj.dims):
raise Exception('obj must be independent of z_t; only dependent on time coordinate')
if ('time' not in obj.dims):
raise Exception('obj must be dependent on time coordinate')
delta_t = (obj.time[2] - obj.time[1]).item()
signs = sign(diff(obj))
extr = []
a_coefs = []
for (n, this_temp) in enumerate(obj):
if ((n == 0) or (n == 1) or (n == (len(obj) - 2)) or (n == (len(obj) - 1))):
continue
x_arr_steps = [(n + i) for i in range((- 2), 2)]
discard = (len(unique(signs[x_arr_steps])) == 1)
if discard:
continue
else:
x_arr = [obj.time[(n + i)].item() for i in range((- 2), 3)]
y_arr = [obj.sel(time=t) for t in x_arr]
this_time = x_arr[2]
coefs = polyfit(x_arr, y_arr, deg=2)
extr_time = ((- coefs[1]) / (2.0 * coefs[0]))
if ((extr_time > x_arr[(- 1)]) or (extr_time < x_arr[0])):
continue
else:
extr.append(extr_time)
a_coefs.append(coefs[0])
extr_min = asarray([val for (n, val) in enumerate(extr) if (a_coefs[n] > 0.0)])
extr_max = asarray([val for (n, val) in enumerate(extr) if (a_coefs[n] <= 0.0)])
sep = 1.5
split_arr_min = [(i + 1) for (i, val) in enumerate(diff(extr_min)) if (val > (sep * delta_t))]
split_arr_max = [(i + 1) for (i, val) in enumerate(diff(extr_max)) if (val > (sep * delta_t))]
extr_min = split(extr_min, split_arr_min)
extr_max = split(extr_max, split_arr_max)
for (i, subarr) in enumerate(extr_min):
extr_min[i] = average(subarr)
if (not only_min):
for (i, subarr) in enumerate(extr_max):
extr_max[i] = average(subarr)
if only_min:
return extr_min
else:
return [extr_max, extr_min] | def find_local_min_max(obj, only_min=False):
'Finds 2 local extrema (1 min and 1 max) in obj by fitting a parabola through each 5 consecutive data points.\n Input:\n - obj is e.g. a temperature time series at a fixed depth (a depth slice)\n - only_min [optional] if not looking for a max, only for 1 min \n Output: [t_min, t_max] as time indices\n Author: Jeemijn Scheen, example@example.com'
from numpy import asarray, round, polyfit, argmax, split, diff, average, insert, abs, sign, zeros, nan, isnan, unique
if ('z_t' in obj.dims):
raise Exception('obj must be independent of z_t; only dependent on time coordinate')
if ('time' not in obj.dims):
raise Exception('obj must be dependent on time coordinate')
delta_t = (obj.time[2] - obj.time[1]).item()
signs = sign(diff(obj))
extr = []
a_coefs = []
for (n, this_temp) in enumerate(obj):
if ((n == 0) or (n == 1) or (n == (len(obj) - 2)) or (n == (len(obj) - 1))):
continue
x_arr_steps = [(n + i) for i in range((- 2), 2)]
discard = (len(unique(signs[x_arr_steps])) == 1)
if discard:
continue
else:
x_arr = [obj.time[(n + i)].item() for i in range((- 2), 3)]
y_arr = [obj.sel(time=t) for t in x_arr]
this_time = x_arr[2]
coefs = polyfit(x_arr, y_arr, deg=2)
extr_time = ((- coefs[1]) / (2.0 * coefs[0]))
if ((extr_time > x_arr[(- 1)]) or (extr_time < x_arr[0])):
continue
else:
extr.append(extr_time)
a_coefs.append(coefs[0])
extr_min = asarray([val for (n, val) in enumerate(extr) if (a_coefs[n] > 0.0)])
extr_max = asarray([val for (n, val) in enumerate(extr) if (a_coefs[n] <= 0.0)])
sep = 1.5
split_arr_min = [(i + 1) for (i, val) in enumerate(diff(extr_min)) if (val > (sep * delta_t))]
split_arr_max = [(i + 1) for (i, val) in enumerate(diff(extr_max)) if (val > (sep * delta_t))]
extr_min = split(extr_min, split_arr_min)
extr_max = split(extr_max, split_arr_max)
for (i, subarr) in enumerate(extr_min):
extr_min[i] = average(subarr)
if (not only_min):
for (i, subarr) in enumerate(extr_max):
extr_max[i] = average(subarr)
if only_min:
return extr_min
else:
return [extr_max, extr_min]<|docstring|>Finds 2 local extrema (1 min and 1 max) in obj by fitting a parabola through each 5 consecutive data points.
Input:
- obj is e.g. a temperature time series at a fixed depth (a depth slice)
- only_min [optional] if not looking for a max, only for 1 min
Output: [t_min, t_max] as time indices
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
d6abaf218a1043d6670f5ffedd16acce1e48cc17fb0c797bc512e398261c1b7e | def calc_leads_lags(obj_t, obj_f, d=26):
'Calculate leads and lags between transient and fixed simulation at a fixed depth of 3 km. \n This function is for the case of a global temperature minm (no maxm) e.g. LIA and industrial warming. \n Input:\n - obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)\n - obj_f idem for fixed simulation\n - d can be set to another depth than the default of d=26 (3km). Useful: 14, 21, 29 are 1, 2, 4 km, respectively.\n Output:\n In this order [time_min_t, time_min_f, val_min_t, val_min_f, lead] for transient (_t) resp. fixed (_f) :\n - time of global minimum (time_min) \n - value of global minimum (val_min) [this is the amplitude, but with a minus sign] \n - lead of transient w.r.t. fixed in yr (lead)\n Example usage:\n [time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f) \n Author: Jeemijn Scheen, example@example.com'
if ('z_t' in obj_t.dims):
obj_t = obj_t.isel(z_t=d)
if ('z_t' in obj_f.dims):
obj_f = obj_f.isel(z_t=d)
[time_min_t_rough, val_min_t] = find_min(obj_t)
[time_min_f_rough, val_min_f] = find_min(obj_f)
time_min_t = find_local_min(obj_t, min_guess=time_min_t_rough)
time_min_f = find_local_min(obj_f, min_guess=time_min_f_rough)
lead = int(round((time_min_f - time_min_t)))
return [time_min_t, time_min_f, val_min_t, val_min_f, lead] | Calculate leads and lags between transient and fixed simulation at a fixed depth of 3 km.
This function is for the case of a global temperature minm (no maxm) e.g. LIA and industrial warming.
Input:
- obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)
- obj_f idem for fixed simulation
- d can be set to another depth than the default of d=26 (3km). Useful: 14, 21, 29 are 1, 2, 4 km, respectively.
Output:
In this order [time_min_t, time_min_f, val_min_t, val_min_f, lead] for transient (_t) resp. fixed (_f) :
- time of global minimum (time_min)
- value of global minimum (val_min) [this is the amplitude, but with a minus sign]
- lead of transient w.r.t. fixed in yr (lead)
Example usage:
[time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f)
Author: Jeemijn Scheen, example@example.com | functions.py | calc_leads_lags | jeemijn/LIA | 0 | python | def calc_leads_lags(obj_t, obj_f, d=26):
'Calculate leads and lags between transient and fixed simulation at a fixed depth of 3 km. \n This function is for the case of a global temperature minm (no maxm) e.g. LIA and industrial warming. \n Input:\n - obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)\n - obj_f idem for fixed simulation\n - d can be set to another depth than the default of d=26 (3km). Useful: 14, 21, 29 are 1, 2, 4 km, respectively.\n Output:\n In this order [time_min_t, time_min_f, val_min_t, val_min_f, lead] for transient (_t) resp. fixed (_f) :\n - time of global minimum (time_min) \n - value of global minimum (val_min) [this is the amplitude, but with a minus sign] \n - lead of transient w.r.t. fixed in yr (lead)\n Example usage:\n [time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f) \n Author: Jeemijn Scheen, example@example.com'
if ('z_t' in obj_t.dims):
obj_t = obj_t.isel(z_t=d)
if ('z_t' in obj_f.dims):
obj_f = obj_f.isel(z_t=d)
[time_min_t_rough, val_min_t] = find_min(obj_t)
[time_min_f_rough, val_min_f] = find_min(obj_f)
time_min_t = find_local_min(obj_t, min_guess=time_min_t_rough)
time_min_f = find_local_min(obj_f, min_guess=time_min_f_rough)
lead = int(round((time_min_f - time_min_t)))
return [time_min_t, time_min_f, val_min_t, val_min_f, lead] | def calc_leads_lags(obj_t, obj_f, d=26):
'Calculate leads and lags between transient and fixed simulation at a fixed depth of 3 km. \n This function is for the case of a global temperature minm (no maxm) e.g. LIA and industrial warming. \n Input:\n - obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)\n - obj_f idem for fixed simulation\n - d can be set to another depth than the default of d=26 (3km). Useful: 14, 21, 29 are 1, 2, 4 km, respectively.\n Output:\n In this order [time_min_t, time_min_f, val_min_t, val_min_f, lead] for transient (_t) resp. fixed (_f) :\n - time of global minimum (time_min) \n - value of global minimum (val_min) [this is the amplitude, but with a minus sign] \n - lead of transient w.r.t. fixed in yr (lead)\n Example usage:\n [time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f) \n Author: Jeemijn Scheen, example@example.com'
if ('z_t' in obj_t.dims):
obj_t = obj_t.isel(z_t=d)
if ('z_t' in obj_f.dims):
obj_f = obj_f.isel(z_t=d)
[time_min_t_rough, val_min_t] = find_min(obj_t)
[time_min_f_rough, val_min_f] = find_min(obj_f)
time_min_t = find_local_min(obj_t, min_guess=time_min_t_rough)
time_min_f = find_local_min(obj_f, min_guess=time_min_f_rough)
lead = int(round((time_min_f - time_min_t)))
return [time_min_t, time_min_f, val_min_t, val_min_f, lead]<|docstring|>Calculate leads and lags between transient and fixed simulation at a fixed depth of 3 km.
This function is for the case of a global temperature minm (no maxm) e.g. LIA and industrial warming.
Input:
- obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)
- obj_f idem for fixed simulation
- d can be set to another depth than the default of d=26 (3km). Useful: 14, 21, 29 are 1, 2, 4 km, respectively.
Output:
In this order [time_min_t, time_min_f, val_min_t, val_min_f, lead] for transient (_t) resp. fixed (_f) :
- time of global minimum (time_min)
- value of global minimum (val_min) [this is the amplitude, but with a minus sign]
- lead of transient w.r.t. fixed in yr (lead)
Example usage:
[time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f)
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
54de0d67a51ffa2472f6d9551b5337eaa650dac9a522bea15d2990d9ef8fa92c | def plot_leads_lags(obj_t, obj_f, ax, color='blue', labels=['transient', 'fixed'], align='lower', indic=True, d=26):
"Plots leads and lags between transient and fixed simulation at a fixed depth of 3 km.\n Input:\n - obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)\n - obj_f idem for fixed simulation\n - ax object of the subplot\n - color of graph line can be set\n - labels array can be given in. Legend is not plotted automatically but must be called\n - align is 'lower' (text and arrow appear below graph) or 'upper' (above graph)\n - if indic then indications and arrows of lead or lag are plotted along [default]\n - d can be set to another depth than the default of d=26 (3.1 km). Useful: 14, 21, 29 are ca. 1, 2, 4 km, resp.\n Output:\n - a plot is made on this axis, including an arrow and indication of the lead or lag in yr\n - axis object is returned\n Example usage:\n ax[1,1] = plot_leads_lags(obj_t, obj_f, ax=ax[1,1]) \n Author: Jeemijn Scheen, example@example.com"
if (color == 'blue'):
col_pos = 'forestgreen'
col_neg = 'deeppink'
else:
col_pos = 'g'
col_neg = 'purple'
pad = 5
if (align not in ('lower', 'upper')):
raise Exception("Align should be 'upper' or 'lower'.")
if (len(labels) is not 2):
raise Exception('Labels should have length 2.')
return
obj_t = obj_t.isel(z_t=d)
obj_f = obj_f.isel(z_t=d)
ax.plot(obj_t.time, obj_t, color, linestyle='solid', label=labels[0])
ax.plot(obj_f.time, obj_f, color, linestyle='dashed', label=labels[1])
if indic:
[time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f, d=d)
if (lead >= 0):
lead_str = ('+' + str(lead))
lag_str = str((- lead))
col_ar = col_pos
else:
lead_str = str(lead)
lag_str = ('+' + str((- lead)))
col_ar = col_neg
ax.scatter([time_min_t], [val_min_t], color=col_ar, marker='x')
ax.scatter([time_min_f], [val_min_f], color=col_ar, marker='x')
if (align == 'lower'):
arrow_y = (val_min_t - pad)
text_y = (arrow_y - (1.5 * pad))
elif (align == 'upper'):
arrow_y = (0 + pad)
text_y = (arrow_y + pad)
ax.annotate('', xytext=(time_min_f, arrow_y), xy=(time_min_t, arrow_y), arrowprops=dict(arrowstyle='->', color=col_ar, linewidth=2))
ax.text(min(time_min_t, time_min_f), text_y, (lag_str + ' yr'), fontsize=16, color=col_ar, ha='left')
return ax | Plots leads and lags between transient and fixed simulation at a fixed depth of 3 km.
Input:
- obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)
- obj_f idem for fixed simulation
- ax object of the subplot
- color of graph line can be set
- labels array can be given in. Legend is not plotted automatically but must be called
- align is 'lower' (text and arrow appear below graph) or 'upper' (above graph)
- if indic then indications and arrows of lead or lag are plotted along [default]
- d can be set to another depth than the default of d=26 (3.1 km). Useful: 14, 21, 29 are ca. 1, 2, 4 km, resp.
Output:
- a plot is made on this axis, including an arrow and indication of the lead or lag in yr
- axis object is returned
Example usage:
ax[1,1] = plot_leads_lags(obj_t, obj_f, ax=ax[1,1])
Author: Jeemijn Scheen, example@example.com | functions.py | plot_leads_lags | jeemijn/LIA | 0 | python | def plot_leads_lags(obj_t, obj_f, ax, color='blue', labels=['transient', 'fixed'], align='lower', indic=True, d=26):
"Plots leads and lags between transient and fixed simulation at a fixed depth of 3 km.\n Input:\n - obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)\n - obj_f idem for fixed simulation\n - ax object of the subplot\n - color of graph line can be set\n - labels array can be given in. Legend is not plotted automatically but must be called\n - align is 'lower' (text and arrow appear below graph) or 'upper' (above graph)\n - if indic then indications and arrows of lead or lag are plotted along [default]\n - d can be set to another depth than the default of d=26 (3.1 km). Useful: 14, 21, 29 are ca. 1, 2, 4 km, resp.\n Output:\n - a plot is made on this axis, including an arrow and indication of the lead or lag in yr\n - axis object is returned\n Example usage:\n ax[1,1] = plot_leads_lags(obj_t, obj_f, ax=ax[1,1]) \n Author: Jeemijn Scheen, example@example.com"
if (color == 'blue'):
col_pos = 'forestgreen'
col_neg = 'deeppink'
else:
col_pos = 'g'
col_neg = 'purple'
pad = 5
if (align not in ('lower', 'upper')):
raise Exception("Align should be 'upper' or 'lower'.")
if (len(labels) is not 2):
raise Exception('Labels should have length 2.')
return
obj_t = obj_t.isel(z_t=d)
obj_f = obj_f.isel(z_t=d)
ax.plot(obj_t.time, obj_t, color, linestyle='solid', label=labels[0])
ax.plot(obj_f.time, obj_f, color, linestyle='dashed', label=labels[1])
if indic:
[time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f, d=d)
if (lead >= 0):
lead_str = ('+' + str(lead))
lag_str = str((- lead))
col_ar = col_pos
else:
lead_str = str(lead)
lag_str = ('+' + str((- lead)))
col_ar = col_neg
ax.scatter([time_min_t], [val_min_t], color=col_ar, marker='x')
ax.scatter([time_min_f], [val_min_f], color=col_ar, marker='x')
if (align == 'lower'):
arrow_y = (val_min_t - pad)
text_y = (arrow_y - (1.5 * pad))
elif (align == 'upper'):
arrow_y = (0 + pad)
text_y = (arrow_y + pad)
ax.annotate(, xytext=(time_min_f, arrow_y), xy=(time_min_t, arrow_y), arrowprops=dict(arrowstyle='->', color=col_ar, linewidth=2))
ax.text(min(time_min_t, time_min_f), text_y, (lag_str + ' yr'), fontsize=16, color=col_ar, ha='left')
return ax | def plot_leads_lags(obj_t, obj_f, ax, color='blue', labels=['transient', 'fixed'], align='lower', indic=True, d=26):
"Plots leads and lags between transient and fixed simulation at a fixed depth of 3 km.\n Input:\n - obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)\n - obj_f idem for fixed simulation\n - ax object of the subplot\n - color of graph line can be set\n - labels array can be given in. Legend is not plotted automatically but must be called\n - align is 'lower' (text and arrow appear below graph) or 'upper' (above graph)\n - if indic then indications and arrows of lead or lag are plotted along [default]\n - d can be set to another depth than the default of d=26 (3.1 km). Useful: 14, 21, 29 are ca. 1, 2, 4 km, resp.\n Output:\n - a plot is made on this axis, including an arrow and indication of the lead or lag in yr\n - axis object is returned\n Example usage:\n ax[1,1] = plot_leads_lags(obj_t, obj_f, ax=ax[1,1]) \n Author: Jeemijn Scheen, example@example.com"
if (color == 'blue'):
col_pos = 'forestgreen'
col_neg = 'deeppink'
else:
col_pos = 'g'
col_neg = 'purple'
pad = 5
if (align not in ('lower', 'upper')):
raise Exception("Align should be 'upper' or 'lower'.")
if (len(labels) is not 2):
raise Exception('Labels should have length 2.')
return
obj_t = obj_t.isel(z_t=d)
obj_f = obj_f.isel(z_t=d)
ax.plot(obj_t.time, obj_t, color, linestyle='solid', label=labels[0])
ax.plot(obj_f.time, obj_f, color, linestyle='dashed', label=labels[1])
if indic:
[time_min_t, time_min_f, val_min_t, val_min_f, lead] = calc_leads_lags(obj_t, obj_f, d=d)
if (lead >= 0):
lead_str = ('+' + str(lead))
lag_str = str((- lead))
col_ar = col_pos
else:
lead_str = str(lead)
lag_str = ('+' + str((- lead)))
col_ar = col_neg
ax.scatter([time_min_t], [val_min_t], color=col_ar, marker='x')
ax.scatter([time_min_f], [val_min_f], color=col_ar, marker='x')
if (align == 'lower'):
arrow_y = (val_min_t - pad)
text_y = (arrow_y - (1.5 * pad))
elif (align == 'upper'):
arrow_y = (0 + pad)
text_y = (arrow_y + pad)
ax.annotate(, xytext=(time_min_f, arrow_y), xy=(time_min_t, arrow_y), arrowprops=dict(arrowstyle='->', color=col_ar, linewidth=2))
ax.text(min(time_min_t, time_min_f), text_y, (lag_str + ' yr'), fontsize=16, color=col_ar, ha='left')
return ax<|docstring|>Plots leads and lags between transient and fixed simulation at a fixed depth of 3 km.
Input:
- obj_t is transient simulation (a variable e.g. TEMP depending on z_t and time)
- obj_f idem for fixed simulation
- ax object of the subplot
- color of graph line can be set
- labels array can be given in. Legend is not plotted automatically but must be called
- align is 'lower' (text and arrow appear below graph) or 'upper' (above graph)
- if indic then indications and arrows of lead or lag are plotted along [default]
- d can be set to another depth than the default of d=26 (3.1 km). Useful: 14, 21, 29 are ca. 1, 2, 4 km, resp.
Output:
- a plot is made on this axis, including an arrow and indication of the lead or lag in yr
- axis object is returned
Example usage:
ax[1,1] = plot_leads_lags(obj_t, obj_f, ax=ax[1,1])
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
b0d957754e674f7c232096006ac79340bad77bad12ef31dba4add10e4be7bd58 | def plot_surface(fig, ax, x, y, z, title='', grid='T', cbar=True, cbar_label='', cmap=None, vmin=None, vmax=None, ticklabels=False):
"Makes a (lat,lon) plot using pcolor. \n Input:\n - fig and ax must be given\n - x and y are either obj.lon_u and obj.lat_u (if var on T grid) or obj.lon_t and obj.lat_t (if var on U grid)\n - z must be a lat x lon array \n Optional input:\n - title\n - grid can be 'T' [default] (if z values on lon_t x lat_t) or 'U' (if on lon_u x lat_u)\n - cbar determines whether a cbar is plotted [default True]\n - cbar_label can be set\n - cmap gives colormap to use\n - vmin, vmax give min and max of colorbar\n - ticklabels prints the tick labels of lat/lon\n Output:\n - [ax, cbar] axis and cbar object are given back.\n Example usage:\n ax[0] = plot_surface(fig, ax[0], obj.lon_u, obj.lat_u, obj.TEMP.isel(z_t=0, time=0).values) "
if (grid == 'T'):
if ((len(x) != 42) or (len(y) != 41) or (z.shape != (40, 41))):
raise Exception("x,y must be on u-grid and z on T-grid (if var is not on T-grid, then set: grid = 'U')")
if (cmap is None):
cpf = ax.pcolor(x, y, extend(z), vmin=vmin, vmax=vmax)
else:
cpf = ax.pcolor(x, y, extend(z), cmap=cmap, vmin=vmin, vmax=vmax)
elif (grid == 'U'):
if ((len(x) != 41) or (len(y) != 40) or (z.shape != (41, 42))):
raise Exception("x,y must be on T-grid and z on U-grid (if var is not on U-grid, then set: grid = 'T')")
return
if (cmap is None):
cpf = ax.pcolor(x, y, z, vmin=vmin, vmax=vmax)
else:
cpf = ax.pcolor(x, y, extend(z), cmap=cmap, vmin=vmin, vmax=vmax)
else:
raise Exception("Set grid to 'T' or 'U'.")
ax.set_title(title)
if (not ticklabels):
ax.tick_params(labelbottom=False, labelleft=False)
if cbar:
cbar = fig.colorbar(cpf, ax=ax, label=cbar_label)
return [ax, cbar]
else:
return [ax, cpf] | Makes a (lat,lon) plot using pcolor.
Input:
- fig and ax must be given
- x and y are either obj.lon_u and obj.lat_u (if var on T grid) or obj.lon_t and obj.lat_t (if var on U grid)
- z must be a lat x lon array
Optional input:
- title
- grid can be 'T' [default] (if z values on lon_t x lat_t) or 'U' (if on lon_u x lat_u)
- cbar determines whether a cbar is plotted [default True]
- cbar_label can be set
- cmap gives colormap to use
- vmin, vmax give min and max of colorbar
- ticklabels prints the tick labels of lat/lon
Output:
- [ax, cbar] axis and cbar object are given back.
Example usage:
ax[0] = plot_surface(fig, ax[0], obj.lon_u, obj.lat_u, obj.TEMP.isel(z_t=0, time=0).values) | functions.py | plot_surface | jeemijn/LIA | 0 | python | def plot_surface(fig, ax, x, y, z, title=, grid='T', cbar=True, cbar_label=, cmap=None, vmin=None, vmax=None, ticklabels=False):
"Makes a (lat,lon) plot using pcolor. \n Input:\n - fig and ax must be given\n - x and y are either obj.lon_u and obj.lat_u (if var on T grid) or obj.lon_t and obj.lat_t (if var on U grid)\n - z must be a lat x lon array \n Optional input:\n - title\n - grid can be 'T' [default] (if z values on lon_t x lat_t) or 'U' (if on lon_u x lat_u)\n - cbar determines whether a cbar is plotted [default True]\n - cbar_label can be set\n - cmap gives colormap to use\n - vmin, vmax give min and max of colorbar\n - ticklabels prints the tick labels of lat/lon\n Output:\n - [ax, cbar] axis and cbar object are given back.\n Example usage:\n ax[0] = plot_surface(fig, ax[0], obj.lon_u, obj.lat_u, obj.TEMP.isel(z_t=0, time=0).values) "
if (grid == 'T'):
if ((len(x) != 42) or (len(y) != 41) or (z.shape != (40, 41))):
raise Exception("x,y must be on u-grid and z on T-grid (if var is not on T-grid, then set: grid = 'U')")
if (cmap is None):
cpf = ax.pcolor(x, y, extend(z), vmin=vmin, vmax=vmax)
else:
cpf = ax.pcolor(x, y, extend(z), cmap=cmap, vmin=vmin, vmax=vmax)
elif (grid == 'U'):
if ((len(x) != 41) or (len(y) != 40) or (z.shape != (41, 42))):
raise Exception("x,y must be on T-grid and z on U-grid (if var is not on U-grid, then set: grid = 'T')")
return
if (cmap is None):
cpf = ax.pcolor(x, y, z, vmin=vmin, vmax=vmax)
else:
cpf = ax.pcolor(x, y, extend(z), cmap=cmap, vmin=vmin, vmax=vmax)
else:
raise Exception("Set grid to 'T' or 'U'.")
ax.set_title(title)
if (not ticklabels):
ax.tick_params(labelbottom=False, labelleft=False)
if cbar:
cbar = fig.colorbar(cpf, ax=ax, label=cbar_label)
return [ax, cbar]
else:
return [ax, cpf] | def plot_surface(fig, ax, x, y, z, title=, grid='T', cbar=True, cbar_label=, cmap=None, vmin=None, vmax=None, ticklabels=False):
"Makes a (lat,lon) plot using pcolor. \n Input:\n - fig and ax must be given\n - x and y are either obj.lon_u and obj.lat_u (if var on T grid) or obj.lon_t and obj.lat_t (if var on U grid)\n - z must be a lat x lon array \n Optional input:\n - title\n - grid can be 'T' [default] (if z values on lon_t x lat_t) or 'U' (if on lon_u x lat_u)\n - cbar determines whether a cbar is plotted [default True]\n - cbar_label can be set\n - cmap gives colormap to use\n - vmin, vmax give min and max of colorbar\n - ticklabels prints the tick labels of lat/lon\n Output:\n - [ax, cbar] axis and cbar object are given back.\n Example usage:\n ax[0] = plot_surface(fig, ax[0], obj.lon_u, obj.lat_u, obj.TEMP.isel(z_t=0, time=0).values) "
if (grid == 'T'):
if ((len(x) != 42) or (len(y) != 41) or (z.shape != (40, 41))):
raise Exception("x,y must be on u-grid and z on T-grid (if var is not on T-grid, then set: grid = 'U')")
if (cmap is None):
cpf = ax.pcolor(x, y, extend(z), vmin=vmin, vmax=vmax)
else:
cpf = ax.pcolor(x, y, extend(z), cmap=cmap, vmin=vmin, vmax=vmax)
elif (grid == 'U'):
if ((len(x) != 41) or (len(y) != 40) or (z.shape != (41, 42))):
raise Exception("x,y must be on T-grid and z on U-grid (if var is not on U-grid, then set: grid = 'T')")
return
if (cmap is None):
cpf = ax.pcolor(x, y, z, vmin=vmin, vmax=vmax)
else:
cpf = ax.pcolor(x, y, extend(z), cmap=cmap, vmin=vmin, vmax=vmax)
else:
raise Exception("Set grid to 'T' or 'U'.")
ax.set_title(title)
if (not ticklabels):
ax.tick_params(labelbottom=False, labelleft=False)
if cbar:
cbar = fig.colorbar(cpf, ax=ax, label=cbar_label)
return [ax, cbar]
else:
return [ax, cpf]<|docstring|>Makes a (lat,lon) plot using pcolor.
Input:
- fig and ax must be given
- x and y are either obj.lon_u and obj.lat_u (if var on T grid) or obj.lon_t and obj.lat_t (if var on U grid)
- z must be a lat x lon array
Optional input:
- title
- grid can be 'T' [default] (if z values on lon_t x lat_t) or 'U' (if on lon_u x lat_u)
- cbar determines whether a cbar is plotted [default True]
- cbar_label can be set
- cmap gives colormap to use
- vmin, vmax give min and max of colorbar
- ticklabels prints the tick labels of lat/lon
Output:
- [ax, cbar] axis and cbar object are given back.
Example usage:
ax[0] = plot_surface(fig, ax[0], obj.lon_u, obj.lat_u, obj.TEMP.isel(z_t=0, time=0).values)<|endoftext|> |
25b0e2b19e699725be7adfe8b6dc94468941bc37503e7ff811414a4d715a7fb2 | def make_colormap(seq):
'Return a LinearSegmentedColormap\n Input:\n - seq is a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1). \n Explanation:\n - For discrete boundaries, located at the floats: mention every colour once.\n - For fluent gradient boundaries: define the same colour on both sides of the float.\n Source: \n https://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale\n '
from matplotlib import colors as colors
seq = (([((None,) * 3), 0.0] + list(seq)) + [1.0, ((None,) * 3)])
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate(seq):
if isinstance(item, float):
(r1, g1, b1) = seq[(i - 1)]
(r2, g2, b2) = seq[(i + 1)]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return colors.LinearSegmentedColormap('CustomMap', cdict) | Return a LinearSegmentedColormap
Input:
- seq is a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
Explanation:
- For discrete boundaries, located at the floats: mention every colour once.
- For fluent gradient boundaries: define the same colour on both sides of the float.
Source:
https://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale | functions.py | make_colormap | jeemijn/LIA | 0 | python | def make_colormap(seq):
'Return a LinearSegmentedColormap\n Input:\n - seq is a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1). \n Explanation:\n - For discrete boundaries, located at the floats: mention every colour once.\n - For fluent gradient boundaries: define the same colour on both sides of the float.\n Source: \n https://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale\n '
from matplotlib import colors as colors
seq = (([((None,) * 3), 0.0] + list(seq)) + [1.0, ((None,) * 3)])
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate(seq):
if isinstance(item, float):
(r1, g1, b1) = seq[(i - 1)]
(r2, g2, b2) = seq[(i + 1)]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return colors.LinearSegmentedColormap('CustomMap', cdict) | def make_colormap(seq):
'Return a LinearSegmentedColormap\n Input:\n - seq is a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1). \n Explanation:\n - For discrete boundaries, located at the floats: mention every colour once.\n - For fluent gradient boundaries: define the same colour on both sides of the float.\n Source: \n https://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale\n '
from matplotlib import colors as colors
seq = (([((None,) * 3), 0.0] + list(seq)) + [1.0, ((None,) * 3)])
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate(seq):
if isinstance(item, float):
(r1, g1, b1) = seq[(i - 1)]
(r2, g2, b2) = seq[(i + 1)]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return colors.LinearSegmentedColormap('CustomMap', cdict)<|docstring|>Return a LinearSegmentedColormap
Input:
- seq is a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
Explanation:
- For discrete boundaries, located at the floats: mention every colour once.
- For fluent gradient boundaries: define the same colour on both sides of the float.
Source:
https://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale<|endoftext|> |
5413a8c07bc26054d336525e76b921249d8535825a1016561ee32f32c5527a58 | def extend(var):
'\n Adds one element of rank-2 matrices to be plotted by pcolor\n Author: Raphael Roth, example@example.com\n '
from numpy import ma as ma
from numpy import ones, nan
[a, b] = var.shape
field = ma.masked_invalid((ones(((a + 1), (b + 1))) * nan))
field[(0:a, 0:b)] = var
return field | Adds one element of rank-2 matrices to be plotted by pcolor
Author: Raphael Roth, example@example.com | functions.py | extend | jeemijn/LIA | 0 | python | def extend(var):
'\n Adds one element of rank-2 matrices to be plotted by pcolor\n Author: Raphael Roth, example@example.com\n '
from numpy import ma as ma
from numpy import ones, nan
[a, b] = var.shape
field = ma.masked_invalid((ones(((a + 1), (b + 1))) * nan))
field[(0:a, 0:b)] = var
return field | def extend(var):
'\n Adds one element of rank-2 matrices to be plotted by pcolor\n Author: Raphael Roth, example@example.com\n '
from numpy import ma as ma
from numpy import ones, nan
[a, b] = var.shape
field = ma.masked_invalid((ones(((a + 1), (b + 1))) * nan))
field[(0:a, 0:b)] = var
return field<|docstring|>Adds one element of rank-2 matrices to be plotted by pcolor
Author: Raphael Roth, example@example.com<|endoftext|> |
e2f00118260caa97dd6d61f6b7d8ec8b41dd666013e7d8b2a5f18fa161faf27c | def create_land_mask(obj, data_full):
'Creates land mask suitable for a (lat,z)-plot or (lon,lat)-plot.\n Input:\n - obj from whose nan values to create land mask e.g. any var masked to atlantic basin \n obj should either have lat_t and z_t coord OR lat_u and z_w coord OR lon_t and lat_t coord\n - data_full: xarray data set that contains respective coords on u,v,w grid\n Output: \n - [mask, cmap_land]\n NB cmap_land is independent of the land mask itself but just needed for plotting\n Usage example for (lat,z)-plot:\n - plot land by:\n X, Y = np.meshgrid(data_full.lat_u.values, data_full.z_w.values)\n if obj has z_t, lat_t coord:\n ax[i].pcolormesh(X,Y,extend(mask), cmap = cmap_land, vmin = -0.5, vmax = 0.5)\n if obj has z_w, lat_u coord: \n identical but without extend()'
from numpy import isnan, ma, unique
from matplotlib import pyplot as plt
if ('time' in obj.dims):
obj = obj.isel(time=0)
if (not ((('lat_t' in obj.dims) and ('z_t' in obj.dims)) or (('lat_u' in obj.dims) and ('z_w' in obj.dims)) or (('lon_t' in obj.dims) and ('lat_t' in obj.dims)))):
raise Exception('obj should have either (z_t,lat_t) or (z_w,lat_u) or (lon_t,lat_t) coord')
if (unique(isnan(obj)).any() == False):
raise Exception('obj has no nan values. Change 0 values to nan values first (if appropriate).')
mask = isnan(obj)
mask = ma.masked_where((mask == False), mask)
cmap_land = plt.cm.Greys
cmap_land.set_bad(color='white')
return [mask, cmap_land] | Creates land mask suitable for a (lat,z)-plot or (lon,lat)-plot.
Input:
- obj from whose nan values to create land mask e.g. any var masked to atlantic basin
obj should either have lat_t and z_t coord OR lat_u and z_w coord OR lon_t and lat_t coord
- data_full: xarray data set that contains respective coords on u,v,w grid
Output:
- [mask, cmap_land]
NB cmap_land is independent of the land mask itself but just needed for plotting
Usage example for (lat,z)-plot:
- plot land by:
X, Y = np.meshgrid(data_full.lat_u.values, data_full.z_w.values)
if obj has z_t, lat_t coord:
ax[i].pcolormesh(X,Y,extend(mask), cmap = cmap_land, vmin = -0.5, vmax = 0.5)
if obj has z_w, lat_u coord:
identical but without extend() | functions.py | create_land_mask | jeemijn/LIA | 0 | python | def create_land_mask(obj, data_full):
'Creates land mask suitable for a (lat,z)-plot or (lon,lat)-plot.\n Input:\n - obj from whose nan values to create land mask e.g. any var masked to atlantic basin \n obj should either have lat_t and z_t coord OR lat_u and z_w coord OR lon_t and lat_t coord\n - data_full: xarray data set that contains respective coords on u,v,w grid\n Output: \n - [mask, cmap_land]\n NB cmap_land is independent of the land mask itself but just needed for plotting\n Usage example for (lat,z)-plot:\n - plot land by:\n X, Y = np.meshgrid(data_full.lat_u.values, data_full.z_w.values)\n if obj has z_t, lat_t coord:\n ax[i].pcolormesh(X,Y,extend(mask), cmap = cmap_land, vmin = -0.5, vmax = 0.5)\n if obj has z_w, lat_u coord: \n identical but without extend()'
from numpy import isnan, ma, unique
from matplotlib import pyplot as plt
if ('time' in obj.dims):
obj = obj.isel(time=0)
if (not ((('lat_t' in obj.dims) and ('z_t' in obj.dims)) or (('lat_u' in obj.dims) and ('z_w' in obj.dims)) or (('lon_t' in obj.dims) and ('lat_t' in obj.dims)))):
raise Exception('obj should have either (z_t,lat_t) or (z_w,lat_u) or (lon_t,lat_t) coord')
if (unique(isnan(obj)).any() == False):
raise Exception('obj has no nan values. Change 0 values to nan values first (if appropriate).')
mask = isnan(obj)
mask = ma.masked_where((mask == False), mask)
cmap_land = plt.cm.Greys
cmap_land.set_bad(color='white')
return [mask, cmap_land] | def create_land_mask(obj, data_full):
'Creates land mask suitable for a (lat,z)-plot or (lon,lat)-plot.\n Input:\n - obj from whose nan values to create land mask e.g. any var masked to atlantic basin \n obj should either have lat_t and z_t coord OR lat_u and z_w coord OR lon_t and lat_t coord\n - data_full: xarray data set that contains respective coords on u,v,w grid\n Output: \n - [mask, cmap_land]\n NB cmap_land is independent of the land mask itself but just needed for plotting\n Usage example for (lat,z)-plot:\n - plot land by:\n X, Y = np.meshgrid(data_full.lat_u.values, data_full.z_w.values)\n if obj has z_t, lat_t coord:\n ax[i].pcolormesh(X,Y,extend(mask), cmap = cmap_land, vmin = -0.5, vmax = 0.5)\n if obj has z_w, lat_u coord: \n identical but without extend()'
from numpy import isnan, ma, unique
from matplotlib import pyplot as plt
if ('time' in obj.dims):
obj = obj.isel(time=0)
if (not ((('lat_t' in obj.dims) and ('z_t' in obj.dims)) or (('lat_u' in obj.dims) and ('z_w' in obj.dims)) or (('lon_t' in obj.dims) and ('lat_t' in obj.dims)))):
raise Exception('obj should have either (z_t,lat_t) or (z_w,lat_u) or (lon_t,lat_t) coord')
if (unique(isnan(obj)).any() == False):
raise Exception('obj has no nan values. Change 0 values to nan values first (if appropriate).')
mask = isnan(obj)
mask = ma.masked_where((mask == False), mask)
cmap_land = plt.cm.Greys
cmap_land.set_bad(color='white')
return [mask, cmap_land]<|docstring|>Creates land mask suitable for a (lat,z)-plot or (lon,lat)-plot.
Input:
- obj from whose nan values to create land mask e.g. any var masked to atlantic basin
obj should either have lat_t and z_t coord OR lat_u and z_w coord OR lon_t and lat_t coord
- data_full: xarray data set that contains respective coords on u,v,w grid
Output:
- [mask, cmap_land]
NB cmap_land is independent of the land mask itself but just needed for plotting
Usage example for (lat,z)-plot:
- plot land by:
X, Y = np.meshgrid(data_full.lat_u.values, data_full.z_w.values)
if obj has z_t, lat_t coord:
ax[i].pcolormesh(X,Y,extend(mask), cmap = cmap_land, vmin = -0.5, vmax = 0.5)
if obj has z_w, lat_u coord:
identical but without extend()<|endoftext|> |
53b1c23648269ed746e38ff84c29a086066a5624485910920f74a2e2a885f61c | def plot_hovmoeller(obj, fig, ax, zoom=None, levels=None, hi=None, lo=None, levelarr=(- 1), ridges=True, cbar=True, title=''):
'Makes a Hovmoeller diagram:\n a contour plot with x=time, y=depth and colors=temperature (or another variable on T-grid).\n \n Input:\n - obj needs to be set to an xarray DataArray containing coords time and z_t (values eg TEMP)\n - fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)\n - ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots\n Optional inputs:\n - zoom can be set to a fixed nr of years (eg if 1200, only the first 1200 years are plotted)\n - hi and lo are min and max of contours, respectively.\n - either levels or levelarr is used to define the number of contours\n - if none of them, then automatic contour levels are used and the colorbar will not be nicely centered\n - levelarr (if supplied) overwrites levels\n - levels is the number of uniformly spaced levels\n - levelarr=[a,b,c] with 3x explicit levels (stepsize may vary; 0 should be in the middle)\n - ridges plots ridge where cooling/warming starts. Can behave badly if changes are small.\n - cbar to be plotted along\n - title of the plot can be set\n\n Output:\n - makes the plot on this axis object \n - returns:\n - if not cbar: cpf (=cbar object)\n - if ridges: ridge values\n - if both of the above: [cpf, ridge values]\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import meshgrid, arange, concatenate
if (obj.shape[0] < 2):
raise Exception('Object needs to have at least 2 timesteps.')
elif (obj.shape[1] < 2):
raise Exception('Object needs to have at least 2 depth steps.')
if (zoom != None):
obj = obj.sel(time=slice(obj.time[0], zoom))
xlist = obj.time.values
ylist = obj.z_t.values
Z = obj.values.transpose()
(X, Y) = meshgrid(xlist, ylist)
if (levelarr != (- 1)):
[a, b, c] = levelarr
level_arr = concatenate((a, b, c)).tolist()
cpf = ax.contourf(X, Y, Z, level_arr, cmap='coolwarm', extend='both')
cp1 = ax.contour(X, Y, Z, a, colors='k', linestyles='-', linewidths=0.5)
cp2 = ax.contour(X, Y, Z, b, colors='k', linestyles='-', linewidths=0.5)
cp3 = ax.contour(X, Y, Z, c, colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, [0.0], colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp1, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.clabel(cp3, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.clabel(cp0, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
elif (levels != None):
step = ((hi - lo) / levels)
level_arr = arange(lo, (hi + step), step)
cpf = ax.contourf(X, Y, Z, level_arr, cmap='coolwarm', extend='both')
cp = ax.contour(X, Y, Z, level_arr, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
else:
cpf = ax.contourf(X, Y, Z, cmap='coolwarm')
cp = ax.contour(X, Y, Z, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.invert_yaxis()
ax.set_xlabel('simulation year')
ax.set_ylabel('Depth [km]')
ax.set_title(title)
if (ridges == True):
ridge_min = find_ridges(obj, only_min=True, min_guess=1750.0, fast=True)
ax.plot(ridge_min, obj.z_t.values, 'r')
if cbar:
fig.colorbar(cpf, ax=ax, label='temp. anomaly [cK]')
if ridges:
return ridge_min
elif ridges:
return [cpf, ridge_min]
else:
return cpf | Makes a Hovmoeller diagram:
a contour plot with x=time, y=depth and colors=temperature (or another variable on T-grid).
Input:
- obj needs to be set to an xarray DataArray containing coords time and z_t (values eg TEMP)
- fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)
- ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots
Optional inputs:
- zoom can be set to a fixed nr of years (eg if 1200, only the first 1200 years are plotted)
- hi and lo are min and max of contours, respectively.
- either levels or levelarr is used to define the number of contours
- if none of them, then automatic contour levels are used and the colorbar will not be nicely centered
- levelarr (if supplied) overwrites levels
- levels is the number of uniformly spaced levels
- levelarr=[a,b,c] with 3x explicit levels (stepsize may vary; 0 should be in the middle)
- ridges plots ridge where cooling/warming starts. Can behave badly if changes are small.
- cbar to be plotted along
- title of the plot can be set
Output:
- makes the plot on this axis object
- returns:
- if not cbar: cpf (=cbar object)
- if ridges: ridge values
- if both of the above: [cpf, ridge values]
Author: Jeemijn Scheen, example@example.com | functions.py | plot_hovmoeller | jeemijn/LIA | 0 | python | def plot_hovmoeller(obj, fig, ax, zoom=None, levels=None, hi=None, lo=None, levelarr=(- 1), ridges=True, cbar=True, title=):
'Makes a Hovmoeller diagram:\n a contour plot with x=time, y=depth and colors=temperature (or another variable on T-grid).\n \n Input:\n - obj needs to be set to an xarray DataArray containing coords time and z_t (values eg TEMP)\n - fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)\n - ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots\n Optional inputs:\n - zoom can be set to a fixed nr of years (eg if 1200, only the first 1200 years are plotted)\n - hi and lo are min and max of contours, respectively.\n - either levels or levelarr is used to define the number of contours\n - if none of them, then automatic contour levels are used and the colorbar will not be nicely centered\n - levelarr (if supplied) overwrites levels\n - levels is the number of uniformly spaced levels\n - levelarr=[a,b,c] with 3x explicit levels (stepsize may vary; 0 should be in the middle)\n - ridges plots ridge where cooling/warming starts. Can behave badly if changes are small.\n - cbar to be plotted along\n - title of the plot can be set\n\n Output:\n - makes the plot on this axis object \n - returns:\n - if not cbar: cpf (=cbar object)\n - if ridges: ridge values\n - if both of the above: [cpf, ridge values]\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import meshgrid, arange, concatenate
if (obj.shape[0] < 2):
raise Exception('Object needs to have at least 2 timesteps.')
elif (obj.shape[1] < 2):
raise Exception('Object needs to have at least 2 depth steps.')
if (zoom != None):
obj = obj.sel(time=slice(obj.time[0], zoom))
xlist = obj.time.values
ylist = obj.z_t.values
Z = obj.values.transpose()
(X, Y) = meshgrid(xlist, ylist)
if (levelarr != (- 1)):
[a, b, c] = levelarr
level_arr = concatenate((a, b, c)).tolist()
cpf = ax.contourf(X, Y, Z, level_arr, cmap='coolwarm', extend='both')
cp1 = ax.contour(X, Y, Z, a, colors='k', linestyles='-', linewidths=0.5)
cp2 = ax.contour(X, Y, Z, b, colors='k', linestyles='-', linewidths=0.5)
cp3 = ax.contour(X, Y, Z, c, colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, [0.0], colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp1, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.clabel(cp3, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.clabel(cp0, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
elif (levels != None):
step = ((hi - lo) / levels)
level_arr = arange(lo, (hi + step), step)
cpf = ax.contourf(X, Y, Z, level_arr, cmap='coolwarm', extend='both')
cp = ax.contour(X, Y, Z, level_arr, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
else:
cpf = ax.contourf(X, Y, Z, cmap='coolwarm')
cp = ax.contour(X, Y, Z, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.invert_yaxis()
ax.set_xlabel('simulation year')
ax.set_ylabel('Depth [km]')
ax.set_title(title)
if (ridges == True):
ridge_min = find_ridges(obj, only_min=True, min_guess=1750.0, fast=True)
ax.plot(ridge_min, obj.z_t.values, 'r')
if cbar:
fig.colorbar(cpf, ax=ax, label='temp. anomaly [cK]')
if ridges:
return ridge_min
elif ridges:
return [cpf, ridge_min]
else:
return cpf | def plot_hovmoeller(obj, fig, ax, zoom=None, levels=None, hi=None, lo=None, levelarr=(- 1), ridges=True, cbar=True, title=):
'Makes a Hovmoeller diagram:\n a contour plot with x=time, y=depth and colors=temperature (or another variable on T-grid).\n \n Input:\n - obj needs to be set to an xarray DataArray containing coords time and z_t (values eg TEMP)\n - fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)\n - ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots\n Optional inputs:\n - zoom can be set to a fixed nr of years (eg if 1200, only the first 1200 years are plotted)\n - hi and lo are min and max of contours, respectively.\n - either levels or levelarr is used to define the number of contours\n - if none of them, then automatic contour levels are used and the colorbar will not be nicely centered\n - levelarr (if supplied) overwrites levels\n - levels is the number of uniformly spaced levels\n - levelarr=[a,b,c] with 3x explicit levels (stepsize may vary; 0 should be in the middle)\n - ridges plots ridge where cooling/warming starts. Can behave badly if changes are small.\n - cbar to be plotted along\n - title of the plot can be set\n\n Output:\n - makes the plot on this axis object \n - returns:\n - if not cbar: cpf (=cbar object)\n - if ridges: ridge values\n - if both of the above: [cpf, ridge values]\n \n Author: Jeemijn Scheen, example@example.com'
from numpy import meshgrid, arange, concatenate
if (obj.shape[0] < 2):
raise Exception('Object needs to have at least 2 timesteps.')
elif (obj.shape[1] < 2):
raise Exception('Object needs to have at least 2 depth steps.')
if (zoom != None):
obj = obj.sel(time=slice(obj.time[0], zoom))
xlist = obj.time.values
ylist = obj.z_t.values
Z = obj.values.transpose()
(X, Y) = meshgrid(xlist, ylist)
if (levelarr != (- 1)):
[a, b, c] = levelarr
level_arr = concatenate((a, b, c)).tolist()
cpf = ax.contourf(X, Y, Z, level_arr, cmap='coolwarm', extend='both')
cp1 = ax.contour(X, Y, Z, a, colors='k', linestyles='-', linewidths=0.5)
cp2 = ax.contour(X, Y, Z, b, colors='k', linestyles='-', linewidths=0.5)
cp3 = ax.contour(X, Y, Z, c, colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, [0.0], colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp1, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.clabel(cp3, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.clabel(cp0, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
elif (levels != None):
step = ((hi - lo) / levels)
level_arr = arange(lo, (hi + step), step)
cpf = ax.contourf(X, Y, Z, level_arr, cmap='coolwarm', extend='both')
cp = ax.contour(X, Y, Z, level_arr, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
else:
cpf = ax.contourf(X, Y, Z, cmap='coolwarm')
cp = ax.contour(X, Y, Z, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt='%1.0f')
ax.invert_yaxis()
ax.set_xlabel('simulation year')
ax.set_ylabel('Depth [km]')
ax.set_title(title)
if (ridges == True):
ridge_min = find_ridges(obj, only_min=True, min_guess=1750.0, fast=True)
ax.plot(ridge_min, obj.z_t.values, 'r')
if cbar:
fig.colorbar(cpf, ax=ax, label='temp. anomaly [cK]')
if ridges:
return ridge_min
elif ridges:
return [cpf, ridge_min]
else:
return cpf<|docstring|>Makes a Hovmoeller diagram:
a contour plot with x=time, y=depth and colors=temperature (or another variable on T-grid).
Input:
- obj needs to be set to an xarray DataArray containing coords time and z_t (values eg TEMP)
- fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)
- ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots
Optional inputs:
- zoom can be set to a fixed nr of years (eg if 1200, only the first 1200 years are plotted)
- hi and lo are min and max of contours, respectively.
- either levels or levelarr is used to define the number of contours
- if none of them, then automatic contour levels are used and the colorbar will not be nicely centered
- levelarr (if supplied) overwrites levels
- levels is the number of uniformly spaced levels
- levelarr=[a,b,c] with 3x explicit levels (stepsize may vary; 0 should be in the middle)
- ridges plots ridge where cooling/warming starts. Can behave badly if changes are small.
- cbar to be plotted along
- title of the plot can be set
Output:
- makes the plot on this axis object
- returns:
- if not cbar: cpf (=cbar object)
- if ridges: ridge values
- if both of the above: [cpf, ridge values]
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
d032b9f35ee11eb046d15eeeb41526460948b572b616cae15d290e0f56e7d28a | def plot_contour(obj, fig, ax, var='OPSI', levels=None, hi=None, lo=None, cbar=True, title='', cmap=None, add_perc=False, extend=None):
"Makes a contour plot with x=lat, y=depth and for colors 3 variables are possible:\n 1) var = 'OPSI' then colors = OPSI (overturning psi, stream function)\n 2) var = 'TEMP' then a lat-lon plot is made e.g. for a temperature, which must be in cK.\n 3) var = 'CONC' idem as OPSI but plots a concentration so no dotted streamline contours etc\n\n Input (required):\n - obj needs to be set to an xarray DataArray containing coords lat and z_t (values eg OPSI+GM_OPSI)\n - fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)\n - ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots\n \n Input (optional):\n - var: see 3 options above\n - levels, hi and lo can be set to nr of contours, highest and lowest value, respectively.\n NB if levels = None, automatic contour levels are used and the colorbar will not be nicely centered.\n NB if var='CONC', colorbar ticks are hardcoded to maximal 6 ticks\n - cbar can be plotted or not\n - title of the plot can be set\n - cmap sets colormap (inspiration: Oranges, Blues, Purples, PuOr_r, viridis) [default: coolwarm]\n - add_perc adds a '%' after each colorbar tick label (=unit for dye concentrations)\n - extend tells the colorbar to extend: 'both', 'neither', 'upper' or 'lower' (default: automatic)\n \n Output:\n - plot is made on axis object\n - if cbar: cbar object is returned (allows you to change the cbar ticks)\n - else: cpf object is returned (allows you to make a cbar)\n \n Author: Jeemijn Scheen, example@example.com"
from numpy import meshgrid, arange, ceil, concatenate, floor, asarray, unique, sort
from matplotlib import ticker, colors
if (obj.shape[0] < 2):
raise Exception('object needs to have at least 2 depth steps.')
elif (obj.shape[1] < 2):
raise Exception('object needs to have at least 2 steps on the x axis of contour plot.')
if (cmap is None):
cmap = 'coolwarm'
if (var == 'OPSI'):
xlist = obj.lat_u.values
ylist = obj.z_w.values
unit = 'Sv'
elif (var == 'TEMP'):
xlist = obj.lon_t.values
ylist = obj.lat_t.values
unit = '[cK]'
elif (var == 'CONC'):
xlist = obj.lat_t.values
ylist = obj.z_t.values
unit = ''
else:
raise Exception('var must be equal to OPSI or TEMP or CONC')
Z = obj.values
(X, Y) = meshgrid(xlist, ylist)
if (levels != None):
step = ((hi - lo) / float(levels))
level_arr = arange(lo, (hi + step), step)
level_arr[(abs(level_arr) < 0.0001)] = 0.0
if (asarray(level_arr).max() >= 10.0):
fmt = '%1.0f'
else:
fmt = '%1.1f'
if (var == 'OPSI'):
cp_neg = ax.contour(X, Y, Z, level_arr[(level_arr < 0)], colors='k', linestyles='dashed', linewidths=0.5)
cp_pos = ax.contour(X, Y, Z, level_arr[(level_arr > 0)], colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) < 0.0001)], colors='k', linestyles='-', linewidths=1.5)
for cp in [cp_neg, cp_pos, cp0]:
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt)
elif (var == 'TEMP'):
this_level_arr = concatenate((level_arr[(abs(level_arr) <= 10.0)], level_arr[((level_arr % 10) == 0)]))
this_level_arr = unique(sort(this_level_arr))
cp = ax.contour(X, Y, Z, this_level_arr, colors='k', linestyles='-', linewidths=0.5)
fmt_dict = {x: (str(x) if ((x - floor(x)) == 0.5) else str(int(round(x)))) for x in this_level_arr}
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt_dict)
elif (var == 'CONC'):
cp_non0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) > 0.0001)], colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) < 0.0001)], colors='k', linestyles='-', linewidths=1.5)
for cp in [cp_non0, cp0]:
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt)
else:
cpf = ax.contourf(X, Y, Z, cmap=cmap)
cp = ax.contour(X, Y, Z, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True)
if (var == 'TEMP'):
zorder = 0
else:
zorder = 1
if (extend is None):
cpf = ax.contourf(X, Y, Z, level_arr, cmap=cmap, zorder=zorder)
else:
cpf = ax.contourf(X, Y, Z, level_arr, cmap=cmap, extend=extend, zorder=zorder)
if cbar:
if (var == 'TEMP'):
cbar_obj = fig.colorbar(cpf, ax=ax, label=unit, orientation='horizontal', pad=0.15)
else:
cbar_obj = fig.colorbar(cpf, ax=ax, label=unit)
if (var == 'CONC'):
tick_locator = ticker.MaxNLocator(nbins=6)
cbar_obj.locator = tick_locator
cbar_obj.update_ticks()
if add_perc:
perc = '%'
else:
perc = ''
cticks = cbar_obj.get_ticks()
ctick_labels = [(('0' + perc) if (abs(x) < 0.0001) else ((str(int(round(x))) + perc) if (hi >= 10.0) else ((str(round(x, 1)) + perc) if (hi > 5.0) else (str(round(x, 2)) + perc)))) for x in cticks]
cbar_obj.ax.set_yticklabels(ctick_labels)
ax.set_title(title)
if (var != 'TEMP'):
ax.set_xlabel('Latitude')
ax.set_ylabel('Depth [km]')
ax.set_ylim(0, 5)
ax.invert_yaxis()
ax.set_yticks(range(0, 6, 1))
if cbar:
return cbar_obj
else:
return cpf | Makes a contour plot with x=lat, y=depth and for colors 3 variables are possible:
1) var = 'OPSI' then colors = OPSI (overturning psi, stream function)
2) var = 'TEMP' then a lat-lon plot is made e.g. for a temperature, which must be in cK.
3) var = 'CONC' idem as OPSI but plots a concentration so no dotted streamline contours etc
Input (required):
- obj needs to be set to an xarray DataArray containing coords lat and z_t (values eg OPSI+GM_OPSI)
- fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)
- ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots
Input (optional):
- var: see 3 options above
- levels, hi and lo can be set to nr of contours, highest and lowest value, respectively.
NB if levels = None, automatic contour levels are used and the colorbar will not be nicely centered.
NB if var='CONC', colorbar ticks are hardcoded to maximal 6 ticks
- cbar can be plotted or not
- title of the plot can be set
- cmap sets colormap (inspiration: Oranges, Blues, Purples, PuOr_r, viridis) [default: coolwarm]
- add_perc adds a '%' after each colorbar tick label (=unit for dye concentrations)
- extend tells the colorbar to extend: 'both', 'neither', 'upper' or 'lower' (default: automatic)
Output:
- plot is made on axis object
- if cbar: cbar object is returned (allows you to change the cbar ticks)
- else: cpf object is returned (allows you to make a cbar)
Author: Jeemijn Scheen, example@example.com | functions.py | plot_contour | jeemijn/LIA | 0 | python | def plot_contour(obj, fig, ax, var='OPSI', levels=None, hi=None, lo=None, cbar=True, title=, cmap=None, add_perc=False, extend=None):
"Makes a contour plot with x=lat, y=depth and for colors 3 variables are possible:\n 1) var = 'OPSI' then colors = OPSI (overturning psi, stream function)\n 2) var = 'TEMP' then a lat-lon plot is made e.g. for a temperature, which must be in cK.\n 3) var = 'CONC' idem as OPSI but plots a concentration so no dotted streamline contours etc\n\n Input (required):\n - obj needs to be set to an xarray DataArray containing coords lat and z_t (values eg OPSI+GM_OPSI)\n - fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)\n - ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots\n \n Input (optional):\n - var: see 3 options above\n - levels, hi and lo can be set to nr of contours, highest and lowest value, respectively.\n NB if levels = None, automatic contour levels are used and the colorbar will not be nicely centered.\n NB if var='CONC', colorbar ticks are hardcoded to maximal 6 ticks\n - cbar can be plotted or not\n - title of the plot can be set\n - cmap sets colormap (inspiration: Oranges, Blues, Purples, PuOr_r, viridis) [default: coolwarm]\n - add_perc adds a '%' after each colorbar tick label (=unit for dye concentrations)\n - extend tells the colorbar to extend: 'both', 'neither', 'upper' or 'lower' (default: automatic)\n \n Output:\n - plot is made on axis object\n - if cbar: cbar object is returned (allows you to change the cbar ticks)\n - else: cpf object is returned (allows you to make a cbar)\n \n Author: Jeemijn Scheen, example@example.com"
from numpy import meshgrid, arange, ceil, concatenate, floor, asarray, unique, sort
from matplotlib import ticker, colors
if (obj.shape[0] < 2):
raise Exception('object needs to have at least 2 depth steps.')
elif (obj.shape[1] < 2):
raise Exception('object needs to have at least 2 steps on the x axis of contour plot.')
if (cmap is None):
cmap = 'coolwarm'
if (var == 'OPSI'):
xlist = obj.lat_u.values
ylist = obj.z_w.values
unit = 'Sv'
elif (var == 'TEMP'):
xlist = obj.lon_t.values
ylist = obj.lat_t.values
unit = '[cK]'
elif (var == 'CONC'):
xlist = obj.lat_t.values
ylist = obj.z_t.values
unit =
else:
raise Exception('var must be equal to OPSI or TEMP or CONC')
Z = obj.values
(X, Y) = meshgrid(xlist, ylist)
if (levels != None):
step = ((hi - lo) / float(levels))
level_arr = arange(lo, (hi + step), step)
level_arr[(abs(level_arr) < 0.0001)] = 0.0
if (asarray(level_arr).max() >= 10.0):
fmt = '%1.0f'
else:
fmt = '%1.1f'
if (var == 'OPSI'):
cp_neg = ax.contour(X, Y, Z, level_arr[(level_arr < 0)], colors='k', linestyles='dashed', linewidths=0.5)
cp_pos = ax.contour(X, Y, Z, level_arr[(level_arr > 0)], colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) < 0.0001)], colors='k', linestyles='-', linewidths=1.5)
for cp in [cp_neg, cp_pos, cp0]:
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt)
elif (var == 'TEMP'):
this_level_arr = concatenate((level_arr[(abs(level_arr) <= 10.0)], level_arr[((level_arr % 10) == 0)]))
this_level_arr = unique(sort(this_level_arr))
cp = ax.contour(X, Y, Z, this_level_arr, colors='k', linestyles='-', linewidths=0.5)
fmt_dict = {x: (str(x) if ((x - floor(x)) == 0.5) else str(int(round(x)))) for x in this_level_arr}
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt_dict)
elif (var == 'CONC'):
cp_non0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) > 0.0001)], colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) < 0.0001)], colors='k', linestyles='-', linewidths=1.5)
for cp in [cp_non0, cp0]:
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt)
else:
cpf = ax.contourf(X, Y, Z, cmap=cmap)
cp = ax.contour(X, Y, Z, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True)
if (var == 'TEMP'):
zorder = 0
else:
zorder = 1
if (extend is None):
cpf = ax.contourf(X, Y, Z, level_arr, cmap=cmap, zorder=zorder)
else:
cpf = ax.contourf(X, Y, Z, level_arr, cmap=cmap, extend=extend, zorder=zorder)
if cbar:
if (var == 'TEMP'):
cbar_obj = fig.colorbar(cpf, ax=ax, label=unit, orientation='horizontal', pad=0.15)
else:
cbar_obj = fig.colorbar(cpf, ax=ax, label=unit)
if (var == 'CONC'):
tick_locator = ticker.MaxNLocator(nbins=6)
cbar_obj.locator = tick_locator
cbar_obj.update_ticks()
if add_perc:
perc = '%'
else:
perc =
cticks = cbar_obj.get_ticks()
ctick_labels = [(('0' + perc) if (abs(x) < 0.0001) else ((str(int(round(x))) + perc) if (hi >= 10.0) else ((str(round(x, 1)) + perc) if (hi > 5.0) else (str(round(x, 2)) + perc)))) for x in cticks]
cbar_obj.ax.set_yticklabels(ctick_labels)
ax.set_title(title)
if (var != 'TEMP'):
ax.set_xlabel('Latitude')
ax.set_ylabel('Depth [km]')
ax.set_ylim(0, 5)
ax.invert_yaxis()
ax.set_yticks(range(0, 6, 1))
if cbar:
return cbar_obj
else:
return cpf | def plot_contour(obj, fig, ax, var='OPSI', levels=None, hi=None, lo=None, cbar=True, title=, cmap=None, add_perc=False, extend=None):
"Makes a contour plot with x=lat, y=depth and for colors 3 variables are possible:\n 1) var = 'OPSI' then colors = OPSI (overturning psi, stream function)\n 2) var = 'TEMP' then a lat-lon plot is made e.g. for a temperature, which must be in cK.\n 3) var = 'CONC' idem as OPSI but plots a concentration so no dotted streamline contours etc\n\n Input (required):\n - obj needs to be set to an xarray DataArray containing coords lat and z_t (values eg OPSI+GM_OPSI)\n - fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)\n - ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots\n \n Input (optional):\n - var: see 3 options above\n - levels, hi and lo can be set to nr of contours, highest and lowest value, respectively.\n NB if levels = None, automatic contour levels are used and the colorbar will not be nicely centered.\n NB if var='CONC', colorbar ticks are hardcoded to maximal 6 ticks\n - cbar can be plotted or not\n - title of the plot can be set\n - cmap sets colormap (inspiration: Oranges, Blues, Purples, PuOr_r, viridis) [default: coolwarm]\n - add_perc adds a '%' after each colorbar tick label (=unit for dye concentrations)\n - extend tells the colorbar to extend: 'both', 'neither', 'upper' or 'lower' (default: automatic)\n \n Output:\n - plot is made on axis object\n - if cbar: cbar object is returned (allows you to change the cbar ticks)\n - else: cpf object is returned (allows you to make a cbar)\n \n Author: Jeemijn Scheen, example@example.com"
from numpy import meshgrid, arange, ceil, concatenate, floor, asarray, unique, sort
from matplotlib import ticker, colors
if (obj.shape[0] < 2):
raise Exception('object needs to have at least 2 depth steps.')
elif (obj.shape[1] < 2):
raise Exception('object needs to have at least 2 steps on the x axis of contour plot.')
if (cmap is None):
cmap = 'coolwarm'
if (var == 'OPSI'):
xlist = obj.lat_u.values
ylist = obj.z_w.values
unit = 'Sv'
elif (var == 'TEMP'):
xlist = obj.lon_t.values
ylist = obj.lat_t.values
unit = '[cK]'
elif (var == 'CONC'):
xlist = obj.lat_t.values
ylist = obj.z_t.values
unit =
else:
raise Exception('var must be equal to OPSI or TEMP or CONC')
Z = obj.values
(X, Y) = meshgrid(xlist, ylist)
if (levels != None):
step = ((hi - lo) / float(levels))
level_arr = arange(lo, (hi + step), step)
level_arr[(abs(level_arr) < 0.0001)] = 0.0
if (asarray(level_arr).max() >= 10.0):
fmt = '%1.0f'
else:
fmt = '%1.1f'
if (var == 'OPSI'):
cp_neg = ax.contour(X, Y, Z, level_arr[(level_arr < 0)], colors='k', linestyles='dashed', linewidths=0.5)
cp_pos = ax.contour(X, Y, Z, level_arr[(level_arr > 0)], colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) < 0.0001)], colors='k', linestyles='-', linewidths=1.5)
for cp in [cp_neg, cp_pos, cp0]:
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt)
elif (var == 'TEMP'):
this_level_arr = concatenate((level_arr[(abs(level_arr) <= 10.0)], level_arr[((level_arr % 10) == 0)]))
this_level_arr = unique(sort(this_level_arr))
cp = ax.contour(X, Y, Z, this_level_arr, colors='k', linestyles='-', linewidths=0.5)
fmt_dict = {x: (str(x) if ((x - floor(x)) == 0.5) else str(int(round(x)))) for x in this_level_arr}
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt_dict)
elif (var == 'CONC'):
cp_non0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) > 0.0001)], colors='k', linestyles='-', linewidths=0.5)
cp0 = ax.contour(X, Y, Z, level_arr[(abs(level_arr) < 0.0001)], colors='k', linestyles='-', linewidths=1.5)
for cp in [cp_non0, cp0]:
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True, fmt=fmt)
else:
cpf = ax.contourf(X, Y, Z, cmap=cmap)
cp = ax.contour(X, Y, Z, colors='k', linestyles='-', linewidths=0.5)
ax.clabel(cp, inline=True, fontsize=12, colors='k', use_clabeltext=True)
if (var == 'TEMP'):
zorder = 0
else:
zorder = 1
if (extend is None):
cpf = ax.contourf(X, Y, Z, level_arr, cmap=cmap, zorder=zorder)
else:
cpf = ax.contourf(X, Y, Z, level_arr, cmap=cmap, extend=extend, zorder=zorder)
if cbar:
if (var == 'TEMP'):
cbar_obj = fig.colorbar(cpf, ax=ax, label=unit, orientation='horizontal', pad=0.15)
else:
cbar_obj = fig.colorbar(cpf, ax=ax, label=unit)
if (var == 'CONC'):
tick_locator = ticker.MaxNLocator(nbins=6)
cbar_obj.locator = tick_locator
cbar_obj.update_ticks()
if add_perc:
perc = '%'
else:
perc =
cticks = cbar_obj.get_ticks()
ctick_labels = [(('0' + perc) if (abs(x) < 0.0001) else ((str(int(round(x))) + perc) if (hi >= 10.0) else ((str(round(x, 1)) + perc) if (hi > 5.0) else (str(round(x, 2)) + perc)))) for x in cticks]
cbar_obj.ax.set_yticklabels(ctick_labels)
ax.set_title(title)
if (var != 'TEMP'):
ax.set_xlabel('Latitude')
ax.set_ylabel('Depth [km]')
ax.set_ylim(0, 5)
ax.invert_yaxis()
ax.set_yticks(range(0, 6, 1))
if cbar:
return cbar_obj
else:
return cpf<|docstring|>Makes a contour plot with x=lat, y=depth and for colors 3 variables are possible:
1) var = 'OPSI' then colors = OPSI (overturning psi, stream function)
2) var = 'TEMP' then a lat-lon plot is made e.g. for a temperature, which must be in cK.
3) var = 'CONC' idem as OPSI but plots a concentration so no dotted streamline contours etc
Input (required):
- obj needs to be set to an xarray DataArray containing coords lat and z_t (values eg OPSI+GM_OPSI)
- fig needs to be given (in order to be able to plot colorbar); from e.g. fig,ax=plt.subplots(1,2)
- ax needs to be set to an axis object; use e.g. ax or ax[2] if multiple subplots
Input (optional):
- var: see 3 options above
- levels, hi and lo can be set to nr of contours, highest and lowest value, respectively.
NB if levels = None, automatic contour levels are used and the colorbar will not be nicely centered.
NB if var='CONC', colorbar ticks are hardcoded to maximal 6 ticks
- cbar can be plotted or not
- title of the plot can be set
- cmap sets colormap (inspiration: Oranges, Blues, Purples, PuOr_r, viridis) [default: coolwarm]
- add_perc adds a '%' after each colorbar tick label (=unit for dye concentrations)
- extend tells the colorbar to extend: 'both', 'neither', 'upper' or 'lower' (default: automatic)
Output:
- plot is made on axis object
- if cbar: cbar object is returned (allows you to change the cbar ticks)
- else: cpf object is returned (allows you to make a cbar)
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
7db0b0ef62ce1a2ffcbc0b03f11da14de3b2acc7fd0cc94bc107b25a0b9f03d5 | def plot_overturning(data, data_full, times, time_avg=False, atl=True, pac=False, sozoom=False, levels=None, lo=None, hi=None, land=True, all_anoms=False):
'Plots figure of overturning stream function panels at certain time steps and basins.\n Columns:\n - a column for every t in times array\n Rows: \n - if atl: overturning as measured only in Atlantic basin\n - if pac: overturning as measured only in Pacific basin\n - if sozoom: Southern Ocean sector of global overturning\n - global overturning (always plotted)\n \n Input:\n - data and data_full xarray datasets with depth in kilometers\n - times array with time indices, e.g., 50 stands for data_full.time[50]\n - time_avg [default False] plots a 30 year average around the selected time steps instead of the 1 annual value \n NB for t=0 a 15 year average on the future side is taken\n - atl, pac and/or sozoom basins (rows; see above)\n - levels, lo and hi set the number of colour levels and min resp. max boundaries\n - land [optional] prints black land on top\n - all_anoms [optional] plots all values as anomalies w.r.t. t1 except the first column (t1)\n NB anomaly plots have a hardcoded colorbar between -2 and 2 Sv\n \n Output:\n - returns [fig, ax]: a figure and axis handle\n \n Author: Jeemijn Scheen, example@example.com'
so_bnd = (- 80)
vmin = 0.8
vmax = 1.5
from matplotlib.pyplot import subplots, suptitle, tight_layout
from numpy import zeros, ceil, sum, nan, meshgrid
row_nr = (1 + sum([atl, pac, sozoom]))
col_nr = len(times)
if all_anoms:
hi_anom = 2.0
lo_anom = (- 2.0)
levels_anom = 10
opsi_all_t = (data_full.OPSI + data_full.GMOPSI)
opsi_a_all_t = (data_full.OPSIA + data_full.GMOPSIA)
opsi_p_all_t = (data_full.OPSIP + data_full.GMOPSIP)
if land:
opsi_all_t = opsi_all_t.where((opsi_all_t != 0.0), nan)
opsi_a_all_t = opsi_a_all_t.where((opsi_a_all_t != 0.0), nan)
opsi_p_all_t = opsi_p_all_t.where((opsi_p_all_t != 0.0), nan)
[mask_gl, cmap_land_gl] = create_land_mask(opsi_all_t, data_full)
[mask_atl, cmap_land_atl] = create_land_mask(opsi_a_all_t, data_full)
[mask_pac, cmap_land_pac] = create_land_mask(opsi_p_all_t, data_full)
(X, Y) = meshgrid(data_full.lat_u.values, data_full.z_w.values)
(fig, ax) = subplots(nrows=row_nr, ncols=col_nr, figsize=(14, (3 * row_nr)))
for i in range(0, row_nr):
for j in range(0, col_nr):
ax[(i, j)].set_xticks(range((- 75), 80, 25))
this_row = 0
if atl:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_a_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_a_all_t.sel(time=t)
this_title = 'Atlantic overturning'
this_ax.set_xlim([(- 50), 90])
if land:
this_ax.pcolormesh(X, Y, mask_atl, cmap=cmap_land_atl, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
if pac:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_p_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_p_all_t.sel(time=t)
this_title = 'Indo-Pacific overturning'
this_ax.set_xlim([(- 50), 90])
if land:
this_ax.pcolormesh(X, Y, mask_pac, cmap=cmap_land_pac, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
if sozoom:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_all_t.sel(lat_u=slice(so_bnd, (- 50)), time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_all_t.sel(lat_u=slice(so_bnd, (- 50)), time=t)
this_title = 'Southern Ocean overturning'
this_ax.set_xticks(range((- 90), (- 45), 10))
if land:
this_ax.pcolormesh(X, Y, mask_gl, cmap=cmap_land_gl, vmin=vmin, vmax=vmax)
this_ax.set_xlim([so_bnd, (- 50)])
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_all_t.sel(time=t)
this_title = 'Global overturning'
if land:
this_ax.pcolormesh(X, Y, mask_gl, cmap=cmap_land_gl, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
if time_avg:
print(('@%1.0f CE: global MOC min=%1.2f Sv, AMOC max=%1.2f Sv' % (ceil(times[n]), data.OPSI_min.sel(time=slice((t - 16), (t + 16))).mean(dim='time'), data.OPSIA_max.sel(time=slice((t - 16), (t + 16))).mean(dim='time'))))
else:
print(('@%1.0f CE: global MOC min=%1.2f Sv, AMOC max=%1.2f Sv' % (ceil(times[n]), data.OPSI_min.sel(time=t).item(), data.OPSIA_max.sel(time=t).item())))
tight_layout()
return (fig, ax) | Plots figure of overturning stream function panels at certain time steps and basins.
Columns:
- a column for every t in times array
Rows:
- if atl: overturning as measured only in Atlantic basin
- if pac: overturning as measured only in Pacific basin
- if sozoom: Southern Ocean sector of global overturning
- global overturning (always plotted)
Input:
- data and data_full xarray datasets with depth in kilometers
- times array with time indices, e.g., 50 stands for data_full.time[50]
- time_avg [default False] plots a 30 year average around the selected time steps instead of the 1 annual value
NB for t=0 a 15 year average on the future side is taken
- atl, pac and/or sozoom basins (rows; see above)
- levels, lo and hi set the number of colour levels and min resp. max boundaries
- land [optional] prints black land on top
- all_anoms [optional] plots all values as anomalies w.r.t. t1 except the first column (t1)
NB anomaly plots have a hardcoded colorbar between -2 and 2 Sv
Output:
- returns [fig, ax]: a figure and axis handle
Author: Jeemijn Scheen, example@example.com | functions.py | plot_overturning | jeemijn/LIA | 0 | python | def plot_overturning(data, data_full, times, time_avg=False, atl=True, pac=False, sozoom=False, levels=None, lo=None, hi=None, land=True, all_anoms=False):
'Plots figure of overturning stream function panels at certain time steps and basins.\n Columns:\n - a column for every t in times array\n Rows: \n - if atl: overturning as measured only in Atlantic basin\n - if pac: overturning as measured only in Pacific basin\n - if sozoom: Southern Ocean sector of global overturning\n - global overturning (always plotted)\n \n Input:\n - data and data_full xarray datasets with depth in kilometers\n - times array with time indices, e.g., 50 stands for data_full.time[50]\n - time_avg [default False] plots a 30 year average around the selected time steps instead of the 1 annual value \n NB for t=0 a 15 year average on the future side is taken\n - atl, pac and/or sozoom basins (rows; see above)\n - levels, lo and hi set the number of colour levels and min resp. max boundaries\n - land [optional] prints black land on top\n - all_anoms [optional] plots all values as anomalies w.r.t. t1 except the first column (t1)\n NB anomaly plots have a hardcoded colorbar between -2 and 2 Sv\n \n Output:\n - returns [fig, ax]: a figure and axis handle\n \n Author: Jeemijn Scheen, example@example.com'
so_bnd = (- 80)
vmin = 0.8
vmax = 1.5
from matplotlib.pyplot import subplots, suptitle, tight_layout
from numpy import zeros, ceil, sum, nan, meshgrid
row_nr = (1 + sum([atl, pac, sozoom]))
col_nr = len(times)
if all_anoms:
hi_anom = 2.0
lo_anom = (- 2.0)
levels_anom = 10
opsi_all_t = (data_full.OPSI + data_full.GMOPSI)
opsi_a_all_t = (data_full.OPSIA + data_full.GMOPSIA)
opsi_p_all_t = (data_full.OPSIP + data_full.GMOPSIP)
if land:
opsi_all_t = opsi_all_t.where((opsi_all_t != 0.0), nan)
opsi_a_all_t = opsi_a_all_t.where((opsi_a_all_t != 0.0), nan)
opsi_p_all_t = opsi_p_all_t.where((opsi_p_all_t != 0.0), nan)
[mask_gl, cmap_land_gl] = create_land_mask(opsi_all_t, data_full)
[mask_atl, cmap_land_atl] = create_land_mask(opsi_a_all_t, data_full)
[mask_pac, cmap_land_pac] = create_land_mask(opsi_p_all_t, data_full)
(X, Y) = meshgrid(data_full.lat_u.values, data_full.z_w.values)
(fig, ax) = subplots(nrows=row_nr, ncols=col_nr, figsize=(14, (3 * row_nr)))
for i in range(0, row_nr):
for j in range(0, col_nr):
ax[(i, j)].set_xticks(range((- 75), 80, 25))
this_row = 0
if atl:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_a_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_a_all_t.sel(time=t)
this_title = 'Atlantic overturning'
this_ax.set_xlim([(- 50), 90])
if land:
this_ax.pcolormesh(X, Y, mask_atl, cmap=cmap_land_atl, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
if pac:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_p_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_p_all_t.sel(time=t)
this_title = 'Indo-Pacific overturning'
this_ax.set_xlim([(- 50), 90])
if land:
this_ax.pcolormesh(X, Y, mask_pac, cmap=cmap_land_pac, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
if sozoom:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_all_t.sel(lat_u=slice(so_bnd, (- 50)), time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_all_t.sel(lat_u=slice(so_bnd, (- 50)), time=t)
this_title = 'Southern Ocean overturning'
this_ax.set_xticks(range((- 90), (- 45), 10))
if land:
this_ax.pcolormesh(X, Y, mask_gl, cmap=cmap_land_gl, vmin=vmin, vmax=vmax)
this_ax.set_xlim([so_bnd, (- 50)])
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_all_t.sel(time=t)
this_title = 'Global overturning'
if land:
this_ax.pcolormesh(X, Y, mask_gl, cmap=cmap_land_gl, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
if time_avg:
print(('@%1.0f CE: global MOC min=%1.2f Sv, AMOC max=%1.2f Sv' % (ceil(times[n]), data.OPSI_min.sel(time=slice((t - 16), (t + 16))).mean(dim='time'), data.OPSIA_max.sel(time=slice((t - 16), (t + 16))).mean(dim='time'))))
else:
print(('@%1.0f CE: global MOC min=%1.2f Sv, AMOC max=%1.2f Sv' % (ceil(times[n]), data.OPSI_min.sel(time=t).item(), data.OPSIA_max.sel(time=t).item())))
tight_layout()
return (fig, ax) | def plot_overturning(data, data_full, times, time_avg=False, atl=True, pac=False, sozoom=False, levels=None, lo=None, hi=None, land=True, all_anoms=False):
'Plots figure of overturning stream function panels at certain time steps and basins.\n Columns:\n - a column for every t in times array\n Rows: \n - if atl: overturning as measured only in Atlantic basin\n - if pac: overturning as measured only in Pacific basin\n - if sozoom: Southern Ocean sector of global overturning\n - global overturning (always plotted)\n \n Input:\n - data and data_full xarray datasets with depth in kilometers\n - times array with time indices, e.g., 50 stands for data_full.time[50]\n - time_avg [default False] plots a 30 year average around the selected time steps instead of the 1 annual value \n NB for t=0 a 15 year average on the future side is taken\n - atl, pac and/or sozoom basins (rows; see above)\n - levels, lo and hi set the number of colour levels and min resp. max boundaries\n - land [optional] prints black land on top\n - all_anoms [optional] plots all values as anomalies w.r.t. t1 except the first column (t1)\n NB anomaly plots have a hardcoded colorbar between -2 and 2 Sv\n \n Output:\n - returns [fig, ax]: a figure and axis handle\n \n Author: Jeemijn Scheen, example@example.com'
so_bnd = (- 80)
vmin = 0.8
vmax = 1.5
from matplotlib.pyplot import subplots, suptitle, tight_layout
from numpy import zeros, ceil, sum, nan, meshgrid
row_nr = (1 + sum([atl, pac, sozoom]))
col_nr = len(times)
if all_anoms:
hi_anom = 2.0
lo_anom = (- 2.0)
levels_anom = 10
opsi_all_t = (data_full.OPSI + data_full.GMOPSI)
opsi_a_all_t = (data_full.OPSIA + data_full.GMOPSIA)
opsi_p_all_t = (data_full.OPSIP + data_full.GMOPSIP)
if land:
opsi_all_t = opsi_all_t.where((opsi_all_t != 0.0), nan)
opsi_a_all_t = opsi_a_all_t.where((opsi_a_all_t != 0.0), nan)
opsi_p_all_t = opsi_p_all_t.where((opsi_p_all_t != 0.0), nan)
[mask_gl, cmap_land_gl] = create_land_mask(opsi_all_t, data_full)
[mask_atl, cmap_land_atl] = create_land_mask(opsi_a_all_t, data_full)
[mask_pac, cmap_land_pac] = create_land_mask(opsi_p_all_t, data_full)
(X, Y) = meshgrid(data_full.lat_u.values, data_full.z_w.values)
(fig, ax) = subplots(nrows=row_nr, ncols=col_nr, figsize=(14, (3 * row_nr)))
for i in range(0, row_nr):
for j in range(0, col_nr):
ax[(i, j)].set_xticks(range((- 75), 80, 25))
this_row = 0
if atl:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_a_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_a_all_t.sel(time=t)
this_title = 'Atlantic overturning'
this_ax.set_xlim([(- 50), 90])
if land:
this_ax.pcolormesh(X, Y, mask_atl, cmap=cmap_land_atl, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
if pac:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_p_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_p_all_t.sel(time=t)
this_title = 'Indo-Pacific overturning'
this_ax.set_xlim([(- 50), 90])
if land:
this_ax.pcolormesh(X, Y, mask_pac, cmap=cmap_land_pac, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
if sozoom:
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_all_t.sel(lat_u=slice(so_bnd, (- 50)), time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_all_t.sel(lat_u=slice(so_bnd, (- 50)), time=t)
this_title = 'Southern Ocean overturning'
this_ax.set_xticks(range((- 90), (- 45), 10))
if land:
this_ax.pcolormesh(X, Y, mask_gl, cmap=cmap_land_gl, vmin=vmin, vmax=vmax)
this_ax.set_xlim([so_bnd, (- 50)])
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
this_row += 1
opsi = {}
for (n, t) in enumerate(times):
if (row_nr == 1):
this_ax = ax[n]
else:
this_ax = ax[(this_row, n)]
if time_avg:
opsi[n] = opsi_all_t.sel(time=slice((t - 16), (t + 16))).mean(dim='time')
else:
opsi[n] = opsi_all_t.sel(time=t)
this_title = 'Global overturning'
if land:
this_ax.pcolormesh(X, Y, mask_gl, cmap=cmap_land_gl, vmin=vmin, vmax=vmax)
if (all_anoms and (n != 0)):
opsi_diff = (opsi[n] - opsi[0])
plot_contour(opsi_diff, fig, ax=this_ax, levels=levels_anom, lo=lo_anom, hi=hi_anom, var='OPSI', extend='both', title=(this_title + ' anomaly'))
else:
plot_contour(opsi[n], fig, ax=this_ax, levels=levels, lo=lo, hi=hi, var='OPSI', extend='both', title=this_title)
if time_avg:
print(('@%1.0f CE: global MOC min=%1.2f Sv, AMOC max=%1.2f Sv' % (ceil(times[n]), data.OPSI_min.sel(time=slice((t - 16), (t + 16))).mean(dim='time'), data.OPSIA_max.sel(time=slice((t - 16), (t + 16))).mean(dim='time'))))
else:
print(('@%1.0f CE: global MOC min=%1.2f Sv, AMOC max=%1.2f Sv' % (ceil(times[n]), data.OPSI_min.sel(time=t).item(), data.OPSIA_max.sel(time=t).item())))
tight_layout()
return (fig, ax)<|docstring|>Plots figure of overturning stream function panels at certain time steps and basins.
Columns:
- a column for every t in times array
Rows:
- if atl: overturning as measured only in Atlantic basin
- if pac: overturning as measured only in Pacific basin
- if sozoom: Southern Ocean sector of global overturning
- global overturning (always plotted)
Input:
- data and data_full xarray datasets with depth in kilometers
- times array with time indices, e.g., 50 stands for data_full.time[50]
- time_avg [default False] plots a 30 year average around the selected time steps instead of the 1 annual value
NB for t=0 a 15 year average on the future side is taken
- atl, pac and/or sozoom basins (rows; see above)
- levels, lo and hi set the number of colour levels and min resp. max boundaries
- land [optional] prints black land on top
- all_anoms [optional] plots all values as anomalies w.r.t. t1 except the first column (t1)
NB anomaly plots have a hardcoded colorbar between -2 and 2 Sv
Output:
- returns [fig, ax]: a figure and axis handle
Author: Jeemijn Scheen, example@example.com<|endoftext|> |
e276063ec53b2f307dd9e34e67c3f7977f9ff335396aa28f631bb6fa9dc5867e | def Bern3D_longitude(ds):
'Converts an xarray dataset with GH19 conventions to Bern3D grid conventions'
ds = ds.rename_dims(dims_dict={'longitude': 'lon_t', 'latitude': 'lat_t'})
ds = ds.rename({'longitude': 'lon_t', 'latitude': 'lat_t'})
ds.lon_t.values = [((t.item() + 360) if (t < 100.0) else t.item()) for t in ds.lon_t]
ds = ds.sortby(ds.lon_t)
return ds | Converts an xarray dataset with GH19 conventions to Bern3D grid conventions | functions.py | Bern3D_longitude | jeemijn/LIA | 0 | python | def Bern3D_longitude(ds):
ds = ds.rename_dims(dims_dict={'longitude': 'lon_t', 'latitude': 'lat_t'})
ds = ds.rename({'longitude': 'lon_t', 'latitude': 'lat_t'})
ds.lon_t.values = [((t.item() + 360) if (t < 100.0) else t.item()) for t in ds.lon_t]
ds = ds.sortby(ds.lon_t)
return ds | def Bern3D_longitude(ds):
ds = ds.rename_dims(dims_dict={'longitude': 'lon_t', 'latitude': 'lat_t'})
ds = ds.rename({'longitude': 'lon_t', 'latitude': 'lat_t'})
ds.lon_t.values = [((t.item() + 360) if (t < 100.0) else t.item()) for t in ds.lon_t]
ds = ds.sortby(ds.lon_t)
return ds<|docstring|>Converts an xarray dataset with GH19 conventions to Bern3D grid conventions<|endoftext|> |
b5341abde3de3c51adeda768ce571598bf4784868b1c39c99a4b23c322c3ed5e | def build_backbone(cfg: DictConfig, input_shape: ShapeSpec):
'\n Build a ImageClassificationBackbone defined by `cfg.MODEL.BACKBONE.name`.\n '
backbone_name = cfg.model.backbone.name
cls = IMAGE_CLASSIFIER_BACKBONES.get(backbone_name)
init_args = cfg.model.backbone.init_args
backbone = cls.from_config_dict(init_args, input_shape=input_shape)
assert isinstance(backbone, ImageClassificationBackbone)
return backbone | Build a ImageClassificationBackbone defined by `cfg.MODEL.BACKBONE.name`. | gale/classification/model/build.py | build_backbone | benihime91/litcv | 0 | python | def build_backbone(cfg: DictConfig, input_shape: ShapeSpec):
'\n \n '
backbone_name = cfg.model.backbone.name
cls = IMAGE_CLASSIFIER_BACKBONES.get(backbone_name)
init_args = cfg.model.backbone.init_args
backbone = cls.from_config_dict(init_args, input_shape=input_shape)
assert isinstance(backbone, ImageClassificationBackbone)
return backbone | def build_backbone(cfg: DictConfig, input_shape: ShapeSpec):
'\n \n '
backbone_name = cfg.model.backbone.name
cls = IMAGE_CLASSIFIER_BACKBONES.get(backbone_name)
init_args = cfg.model.backbone.init_args
backbone = cls.from_config_dict(init_args, input_shape=input_shape)
assert isinstance(backbone, ImageClassificationBackbone)
return backbone<|docstring|>Build a ImageClassificationBackbone defined by `cfg.MODEL.BACKBONE.name`.<|endoftext|> |
8d266e45628e370419ba017e948223196d1f48d994beafa4d096b3dccc47f109 | def build_head(cfg: DictConfig, input_shape: ShapeSpec):
'\n Build ImageClassification defined by `cfg.MODEL.HEAD.name`.\n '
name = cfg.model.head.name
cls = IMAGE_CLASSIFIER_HEADS.get(name)
init_args = cfg.model.head.init_args
head = cls.from_config_dict(init_args, input_shape=input_shape)
assert isinstance(head, ImageClassificationHead)
return head | Build ImageClassification defined by `cfg.MODEL.HEAD.name`. | gale/classification/model/build.py | build_head | benihime91/litcv | 0 | python | def build_head(cfg: DictConfig, input_shape: ShapeSpec):
'\n \n '
name = cfg.model.head.name
cls = IMAGE_CLASSIFIER_HEADS.get(name)
init_args = cfg.model.head.init_args
head = cls.from_config_dict(init_args, input_shape=input_shape)
assert isinstance(head, ImageClassificationHead)
return head | def build_head(cfg: DictConfig, input_shape: ShapeSpec):
'\n \n '
name = cfg.model.head.name
cls = IMAGE_CLASSIFIER_HEADS.get(name)
init_args = cfg.model.head.init_args
head = cls.from_config_dict(init_args, input_shape=input_shape)
assert isinstance(head, ImageClassificationHead)
return head<|docstring|>Build ImageClassification defined by `cfg.MODEL.HEAD.name`.<|endoftext|> |
764bf5ef76b88957f6bbc090116a966897b645f009965f01e3c0e03ba80f4345 | def __init__(self, url=WEBLYZARD_API_URL, usr=WEBLYZARD_API_USER, pwd=WEBLYZARD_API_PASS, default_timeout=None):
'\n :param url: URL of the jeremia web service\n :param usr: optional user name\n :param pwd: optional password\n '
MultiRESTClient.__init__(self, service_urls=url, user=usr, password=pwd, default_timeout=default_timeout) | :param url: URL of the jeremia web service
:param usr: optional user name
:param pwd: optional password | src/python/weblyzard_api/client/domain_specificity.py | __init__ | weblyzard/weblyzard_api | 9 | python | def __init__(self, url=WEBLYZARD_API_URL, usr=WEBLYZARD_API_USER, pwd=WEBLYZARD_API_PASS, default_timeout=None):
'\n :param url: URL of the jeremia web service\n :param usr: optional user name\n :param pwd: optional password\n '
MultiRESTClient.__init__(self, service_urls=url, user=usr, password=pwd, default_timeout=default_timeout) | def __init__(self, url=WEBLYZARD_API_URL, usr=WEBLYZARD_API_USER, pwd=WEBLYZARD_API_PASS, default_timeout=None):
'\n :param url: URL of the jeremia web service\n :param usr: optional user name\n :param pwd: optional password\n '
MultiRESTClient.__init__(self, service_urls=url, user=usr, password=pwd, default_timeout=default_timeout)<|docstring|>:param url: URL of the jeremia web service
:param usr: optional user name
:param pwd: optional password<|endoftext|> |
0fc308a5633ef1d3eb9823abf3ffecaa3aed591c978e41b259c2d33bd91d7514 | def add_profile(self, profile_name, profile_mapping):
'\n Adds a domain-specificity profile to the Web service.\n\n :param profile_name: the name of the domain specificity profile\n :param profile_mapping: a dictionary of keywords and their respective domain specificity values.\n '
return self.request(('add_or_refresh_profile/%s' % profile_name), profile_mapping, execute_all_services=True) | Adds a domain-specificity profile to the Web service.
:param profile_name: the name of the domain specificity profile
:param profile_mapping: a dictionary of keywords and their respective domain specificity values. | src/python/weblyzard_api/client/domain_specificity.py | add_profile | weblyzard/weblyzard_api | 9 | python | def add_profile(self, profile_name, profile_mapping):
'\n Adds a domain-specificity profile to the Web service.\n\n :param profile_name: the name of the domain specificity profile\n :param profile_mapping: a dictionary of keywords and their respective domain specificity values.\n '
return self.request(('add_or_refresh_profile/%s' % profile_name), profile_mapping, execute_all_services=True) | def add_profile(self, profile_name, profile_mapping):
'\n Adds a domain-specificity profile to the Web service.\n\n :param profile_name: the name of the domain specificity profile\n :param profile_mapping: a dictionary of keywords and their respective domain specificity values.\n '
return self.request(('add_or_refresh_profile/%s' % profile_name), profile_mapping, execute_all_services=True)<|docstring|>Adds a domain-specificity profile to the Web service.
:param profile_name: the name of the domain specificity profile
:param profile_mapping: a dictionary of keywords and their respective domain specificity values.<|endoftext|> |
a293299232f4132b40a3736377798dbd2bdcaa73c975b409c23e7579546a5ba0 | def get_domain_specificity(self, profile_name, documents, is_case_sensitive=True):
' \n :param profile_name: the name of the domain specificity profile to use.\n :param documents: a list of dictionaries containing the document\n :param is_case_sensitive: whether to consider case or not (default: True) \n '
return self.request(('parse_documents/%s/%s' % (profile_name, is_case_sensitive)), documents) | :param profile_name: the name of the domain specificity profile to use.
:param documents: a list of dictionaries containing the document
:param is_case_sensitive: whether to consider case or not (default: True) | src/python/weblyzard_api/client/domain_specificity.py | get_domain_specificity | weblyzard/weblyzard_api | 9 | python | def get_domain_specificity(self, profile_name, documents, is_case_sensitive=True):
' \n :param profile_name: the name of the domain specificity profile to use.\n :param documents: a list of dictionaries containing the document\n :param is_case_sensitive: whether to consider case or not (default: True) \n '
return self.request(('parse_documents/%s/%s' % (profile_name, is_case_sensitive)), documents) | def get_domain_specificity(self, profile_name, documents, is_case_sensitive=True):
' \n :param profile_name: the name of the domain specificity profile to use.\n :param documents: a list of dictionaries containing the document\n :param is_case_sensitive: whether to consider case or not (default: True) \n '
return self.request(('parse_documents/%s/%s' % (profile_name, is_case_sensitive)), documents)<|docstring|>:param profile_name: the name of the domain specificity profile to use.
:param documents: a list of dictionaries containing the document
:param is_case_sensitive: whether to consider case or not (default: True)<|endoftext|> |
678ea2ba2d1eaf64ebff69eeddedb757edc6872854f27ebae697d76558acda2d | def parse_documents(self, matview_name, documents, is_case_sensitive=False, batch_size=None):
' \n :param matview_name: a comma separated list of matview_names to check for domain specificity.\n :param documents: a list of dictionaries containing the document\n :param is_case_sensitive: case sensitive or not\n :returns: dict (profilename: (content_id, dom_spec)) \n '
found_tags = {}
for document_batch in self.get_document_batch(documents=documents, batch_size=batch_size):
result = self.request(('parse_documents/%s/%s' % (matview_name, is_case_sensitive)), document_batch)
if result:
found_tags.update(result[matview_name])
return found_tags | :param matview_name: a comma separated list of matview_names to check for domain specificity.
:param documents: a list of dictionaries containing the document
:param is_case_sensitive: case sensitive or not
:returns: dict (profilename: (content_id, dom_spec)) | src/python/weblyzard_api/client/domain_specificity.py | parse_documents | weblyzard/weblyzard_api | 9 | python | def parse_documents(self, matview_name, documents, is_case_sensitive=False, batch_size=None):
' \n :param matview_name: a comma separated list of matview_names to check for domain specificity.\n :param documents: a list of dictionaries containing the document\n :param is_case_sensitive: case sensitive or not\n :returns: dict (profilename: (content_id, dom_spec)) \n '
found_tags = {}
for document_batch in self.get_document_batch(documents=documents, batch_size=batch_size):
result = self.request(('parse_documents/%s/%s' % (matview_name, is_case_sensitive)), document_batch)
if result:
found_tags.update(result[matview_name])
return found_tags | def parse_documents(self, matview_name, documents, is_case_sensitive=False, batch_size=None):
' \n :param matview_name: a comma separated list of matview_names to check for domain specificity.\n :param documents: a list of dictionaries containing the document\n :param is_case_sensitive: case sensitive or not\n :returns: dict (profilename: (content_id, dom_spec)) \n '
found_tags = {}
for document_batch in self.get_document_batch(documents=documents, batch_size=batch_size):
result = self.request(('parse_documents/%s/%s' % (matview_name, is_case_sensitive)), document_batch)
if result:
found_tags.update(result[matview_name])
return found_tags<|docstring|>:param matview_name: a comma separated list of matview_names to check for domain specificity.
:param documents: a list of dictionaries containing the document
:param is_case_sensitive: case sensitive or not
:returns: dict (profilename: (content_id, dom_spec))<|endoftext|> |
db3aa822d7cf93fe65fd0fe017e1218b2e09ac27928ea7b0f2b4831ce3a9e45a | def list_profiles(self):
'\n :returns: a list of all available domain specificity profiles.\n '
return self.request('list_profiles') | :returns: a list of all available domain specificity profiles. | src/python/weblyzard_api/client/domain_specificity.py | list_profiles | weblyzard/weblyzard_api | 9 | python | def list_profiles(self):
'\n \n '
return self.request('list_profiles') | def list_profiles(self):
'\n \n '
return self.request('list_profiles')<|docstring|>:returns: a list of all available domain specificity profiles.<|endoftext|> |
ef0f7f74e104e54ab7463e2f25aa1d106ee3b0d15472c90d144740f1ae2549fa | def has_profile(self, profile_name):
'\n Returns whether the given profile exists on the server.\n\n :param profile_name: the name of the domain specificity profile to check. \n :returns: ``True`` if the given profile exists on the server.\n '
return (profile_name in self.list_profiles()) | Returns whether the given profile exists on the server.
:param profile_name: the name of the domain specificity profile to check.
:returns: ``True`` if the given profile exists on the server. | src/python/weblyzard_api/client/domain_specificity.py | has_profile | weblyzard/weblyzard_api | 9 | python | def has_profile(self, profile_name):
'\n Returns whether the given profile exists on the server.\n\n :param profile_name: the name of the domain specificity profile to check. \n :returns: ``True`` if the given profile exists on the server.\n '
return (profile_name in self.list_profiles()) | def has_profile(self, profile_name):
'\n Returns whether the given profile exists on the server.\n\n :param profile_name: the name of the domain specificity profile to check. \n :returns: ``True`` if the given profile exists on the server.\n '
return (profile_name in self.list_profiles())<|docstring|>Returns whether the given profile exists on the server.
:param profile_name: the name of the domain specificity profile to check.
:returns: ``True`` if the given profile exists on the server.<|endoftext|> |
bb89bb48f62562f6e04458dfddc24eaa8f5b5bb44f9b0a6d436046d67e2cf527 | def meminfo(self):
"\n :returns: Information on the web service's memory consumption\n "
return self.request('meminfo') | :returns: Information on the web service's memory consumption | src/python/weblyzard_api/client/domain_specificity.py | meminfo | weblyzard/weblyzard_api | 9 | python | def meminfo(self):
"\n \n "
return self.request('meminfo') | def meminfo(self):
"\n \n "
return self.request('meminfo')<|docstring|>:returns: Information on the web service's memory consumption<|endoftext|> |
ae0a271626a467b42921761378b4ec1b97bd609d521513269a3774e6b8b2c7a0 | def calculate_size(name, new_value):
' Calculates the request payload size'
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size | hazelcast/protocol/codec/atomic_long_set_codec.py | calculate_size | SaitTalhaNisanci/hazelcast-python-client | 3 | python | def calculate_size(name, new_value):
' '
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | def calculate_size(name, new_value):
' '
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size<|docstring|>Calculates the request payload size<|endoftext|> |
5b052c3e26cf1c36d902806e28551b97f8b65dcb32066dcfdbe08f291307afa7 | def encode_request(name, new_value):
' Encode request into client_message'
client_message = ClientMessage(payload_size=calculate_size(name, new_value))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_long(new_value)
client_message.update_frame_length()
return client_message | Encode request into client_message | hazelcast/protocol/codec/atomic_long_set_codec.py | encode_request | SaitTalhaNisanci/hazelcast-python-client | 3 | python | def encode_request(name, new_value):
' '
client_message = ClientMessage(payload_size=calculate_size(name, new_value))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_long(new_value)
client_message.update_frame_length()
return client_message | def encode_request(name, new_value):
' '
client_message = ClientMessage(payload_size=calculate_size(name, new_value))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_long(new_value)
client_message.update_frame_length()
return client_message<|docstring|>Encode request into client_message<|endoftext|> |
6f5ead813a786710d79d8a03cb8a468fe98ea184594fa49f5f1debbb55d4abfa | @layer
def conv_rpn(self, input, k_h, k_w, c_o, s_h, s_w, name, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True):
' contribution by miraclebiu, and biased option'
self.validate_padding(padding)
c_i = input.get_shape()[(- 1)]
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.0001)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
else:
conv = convolve(input, kernel)
if relu:
return tf.nn.relu(conv, name=scope.name)
return conv | contribution by miraclebiu, and biased option | lib/networks/network.py | conv_rpn | wenlihaoyu/text-detection-ctpn | 0 | python | @layer
def conv_rpn(self, input, k_h, k_w, c_o, s_h, s_w, name, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True):
' '
self.validate_padding(padding)
c_i = input.get_shape()[(- 1)]
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.0001)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
else:
conv = convolve(input, kernel)
if relu:
return tf.nn.relu(conv, name=scope.name)
return conv | @layer
def conv_rpn(self, input, k_h, k_w, c_o, s_h, s_w, name, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True):
' '
self.validate_padding(padding)
c_i = input.get_shape()[(- 1)]
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.0001)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
else:
conv = convolve(input, kernel)
if relu:
return tf.nn.relu(conv, name=scope.name)
return conv<|docstring|>contribution by miraclebiu, and biased option<|endoftext|> |
9d046c1f2484c231f3d53d6234c32b9a7a5716120e18d7b9a1bee2dd1e8c7bbf | @layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True):
' contribution by miraclebiu, and biased option'
self.validate_padding(padding)
c_i = input.get_shape()[(- 1)]
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
else:
conv = convolve(input, kernel)
if relu:
return tf.nn.relu(conv, name=scope.name)
return conv | contribution by miraclebiu, and biased option | lib/networks/network.py | conv | wenlihaoyu/text-detection-ctpn | 0 | python | @layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True):
' '
self.validate_padding(padding)
c_i = input.get_shape()[(- 1)]
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
else:
conv = convolve(input, kernel)
if relu:
return tf.nn.relu(conv, name=scope.name)
return conv | @layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, biased=True, relu=True, padding=DEFAULT_PADDING, trainable=True):
' '
self.validate_padding(padding)
c_i = input.get_shape()[(- 1)]
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding))
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
if biased:
biases = self.make_var('biases', [c_o], init_biases, trainable)
conv = convolve(input, kernel)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
else:
conv = convolve(input, kernel)
if relu:
return tf.nn.relu(conv, name=scope.name)
return conv<|docstring|>contribution by miraclebiu, and biased option<|endoftext|> |
d37f3bc9e48e3e269a27c608d8b2494258a65393c78180f2f4163833a40f6f53 | @layer
def psroi_pool(self, input, output_dim, group_size, spatial_scale, name):
'contribution by miraclebiu'
if isinstance(input[0], tuple):
input[0] = input[0][0]
if isinstance(input[1], tuple):
input[1] = input[1][0]
return psroi_pooling_op.psroi_pool(input[0], input[1], output_dim=output_dim, group_size=group_size, spatial_scale=spatial_scale, name=name)[0] | contribution by miraclebiu | lib/networks/network.py | psroi_pool | wenlihaoyu/text-detection-ctpn | 0 | python | @layer
def psroi_pool(self, input, output_dim, group_size, spatial_scale, name):
if isinstance(input[0], tuple):
input[0] = input[0][0]
if isinstance(input[1], tuple):
input[1] = input[1][0]
return psroi_pooling_op.psroi_pool(input[0], input[1], output_dim=output_dim, group_size=group_size, spatial_scale=spatial_scale, name=name)[0] | @layer
def psroi_pool(self, input, output_dim, group_size, spatial_scale, name):
if isinstance(input[0], tuple):
input[0] = input[0][0]
if isinstance(input[1], tuple):
input[1] = input[1][0]
return psroi_pooling_op.psroi_pool(input[0], input[1], output_dim=output_dim, group_size=group_size, spatial_scale=spatial_scale, name=name)[0]<|docstring|>contribution by miraclebiu<|endoftext|> |
cc2ff8501871c908b91c5adaeb7aedfdd9596e0f964eaf95d3625c6c6cdee0ce | @layer
def add(self, input, name):
'contribution by miraclebiu'
return tf.add(input[0], input[1]) | contribution by miraclebiu | lib/networks/network.py | add | wenlihaoyu/text-detection-ctpn | 0 | python | @layer
def add(self, input, name):
return tf.add(input[0], input[1]) | @layer
def add(self, input, name):
return tf.add(input[0], input[1])<|docstring|>contribution by miraclebiu<|endoftext|> |
a2dab184e14a0798b215102761bf5224f2c5e61d8f249ab6191e3a97113a0672 | @layer
def batch_normalization(self, input, name, relu=True, is_training=False):
'contribution by miraclebiu'
if relu:
temp_layer = tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name)
return tf.nn.relu(temp_layer)
else:
return tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name) | contribution by miraclebiu | lib/networks/network.py | batch_normalization | wenlihaoyu/text-detection-ctpn | 0 | python | @layer
def batch_normalization(self, input, name, relu=True, is_training=False):
if relu:
temp_layer = tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name)
return tf.nn.relu(temp_layer)
else:
return tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name) | @layer
def batch_normalization(self, input, name, relu=True, is_training=False):
if relu:
temp_layer = tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name)
return tf.nn.relu(temp_layer)
else:
return tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name)<|docstring|>contribution by miraclebiu<|endoftext|> |
a7af0638a2794cb30993b9d22863d28a8a0959e821916b8166f1f06624b07118 | def close(self):
'Cleanup temporary files.'
for file in (self.compressed_layer_files + self.uncompressed_layer_files):
file.close() | Cleanup temporary files. | docker_sign_verify/imagesource.py | close | crashvb/docker-sign-verify | 4 | python | def close(self):
for file in (self.compressed_layer_files + self.uncompressed_layer_files):
file.close() | def close(self):
for file in (self.compressed_layer_files + self.uncompressed_layer_files):
file.close()<|docstring|>Cleanup temporary files.<|endoftext|> |
a7af0638a2794cb30993b9d22863d28a8a0959e821916b8166f1f06624b07118 | def close(self):
'Cleanup temporary files.'
for file in (self.compressed_layer_files + self.uncompressed_layer_files):
file.close() | Cleanup temporary files. | docker_sign_verify/imagesource.py | close | crashvb/docker-sign-verify | 4 | python | def close(self):
for file in (self.compressed_layer_files + self.uncompressed_layer_files):
file.close() | def close(self):
for file in (self.compressed_layer_files + self.uncompressed_layer_files):
file.close()<|docstring|>Cleanup temporary files.<|endoftext|> |
6dd9f5134c1232515865927f2ef53ca12b8fa78afbbb3fb6dad51c6367591c54 | def __init__(self, *, dry_run: bool=False, signer_kwargs: Dict[(str, Dict)]=None, **kwargs):
'\n Args:\n dry_run: If true, destination image sources will not be changed.\n signer_kwargs: Parameters to be passed to the Signer instances when the are initialized.\n image_source_params: Extra parameters for image source processing.\n '
self.dry_run = dry_run
self.signer_kwargs = signer_kwargs
if (self.signer_kwargs is None):
self.signer_kwargs = {} | Args:
dry_run: If true, destination image sources will not be changed.
signer_kwargs: Parameters to be passed to the Signer instances when the are initialized.
image_source_params: Extra parameters for image source processing. | docker_sign_verify/imagesource.py | __init__ | crashvb/docker-sign-verify | 4 | python | def __init__(self, *, dry_run: bool=False, signer_kwargs: Dict[(str, Dict)]=None, **kwargs):
'\n Args:\n dry_run: If true, destination image sources will not be changed.\n signer_kwargs: Parameters to be passed to the Signer instances when the are initialized.\n image_source_params: Extra parameters for image source processing.\n '
self.dry_run = dry_run
self.signer_kwargs = signer_kwargs
if (self.signer_kwargs is None):
self.signer_kwargs = {} | def __init__(self, *, dry_run: bool=False, signer_kwargs: Dict[(str, Dict)]=None, **kwargs):
'\n Args:\n dry_run: If true, destination image sources will not be changed.\n signer_kwargs: Parameters to be passed to the Signer instances when the are initialized.\n image_source_params: Extra parameters for image source processing.\n '
self.dry_run = dry_run
self.signer_kwargs = signer_kwargs
if (self.signer_kwargs is None):
self.signer_kwargs = {}<|docstring|>Args:
dry_run: If true, destination image sources will not be changed.
signer_kwargs: Parameters to be passed to the Signer instances when the are initialized.
image_source_params: Extra parameters for image source processing.<|endoftext|> |
5b7f45cc7977cfdcf03de4ab300f0af94deb3f5686b00e9015983181c361d2f3 | @staticmethod
def check_dry_run(func):
'Validates the state of ImageSource.dry_run before invoking the wrapped method.'
@wraps(func)
async def wrapper(*args, **kwargs):
if args[0].dry_run:
LOGGER.debug('Dry Run: skipping %s', func)
else:
return (await func(*args, **kwargs))
return wrapper | Validates the state of ImageSource.dry_run before invoking the wrapped method. | docker_sign_verify/imagesource.py | check_dry_run | crashvb/docker-sign-verify | 4 | python | @staticmethod
def check_dry_run(func):
@wraps(func)
async def wrapper(*args, **kwargs):
if args[0].dry_run:
LOGGER.debug('Dry Run: skipping %s', func)
else:
return (await func(*args, **kwargs))
return wrapper | @staticmethod
def check_dry_run(func):
@wraps(func)
async def wrapper(*args, **kwargs):
if args[0].dry_run:
LOGGER.debug('Dry Run: skipping %s', func)
else:
return (await func(*args, **kwargs))
return wrapper<|docstring|>Validates the state of ImageSource.dry_run before invoking the wrapped method.<|endoftext|> |
983cdd98105e05533036d4d8e036b9f52115127be96e7520cf26e3bbb5e122af | async def _sign_image_config(self, signer: Signer, image_name: ImageName, signature_type: SignatureTypes, **kwargs) -> ImageSourceSignImageConfig:
'\n Verifies an image, then signs it without storing it in the image source.\n\n Args:\n signer: The signer used to create the signature value.\n image_name: The image name.\n signature_type: Specifies what type of signature action to perform.\n\n Returns:\n NamedTuple:\n image_config: The ImageConfig object corresponding to the signed image.\n signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.\n verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.\n '
data = (await self.verify_image_integrity(image_name, **kwargs))
try:
signature_value = (await data.image_config.sign(signer, signature_type))
except Exception:
for file in (data.compressed_layer_files + data.uncompressed_layer_files):
file.close()
raise
return ImageSourceSignImageConfig(image_config=data.image_config, signature_value=signature_value, verify_image_data=data) | Verifies an image, then signs it without storing it in the image source.
Args:
signer: The signer used to create the signature value.
image_name: The image name.
signature_type: Specifies what type of signature action to perform.
Returns:
NamedTuple:
image_config: The ImageConfig object corresponding to the signed image.
signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.
verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity. | docker_sign_verify/imagesource.py | _sign_image_config | crashvb/docker-sign-verify | 4 | python | async def _sign_image_config(self, signer: Signer, image_name: ImageName, signature_type: SignatureTypes, **kwargs) -> ImageSourceSignImageConfig:
'\n Verifies an image, then signs it without storing it in the image source.\n\n Args:\n signer: The signer used to create the signature value.\n image_name: The image name.\n signature_type: Specifies what type of signature action to perform.\n\n Returns:\n NamedTuple:\n image_config: The ImageConfig object corresponding to the signed image.\n signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.\n verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.\n '
data = (await self.verify_image_integrity(image_name, **kwargs))
try:
signature_value = (await data.image_config.sign(signer, signature_type))
except Exception:
for file in (data.compressed_layer_files + data.uncompressed_layer_files):
file.close()
raise
return ImageSourceSignImageConfig(image_config=data.image_config, signature_value=signature_value, verify_image_data=data) | async def _sign_image_config(self, signer: Signer, image_name: ImageName, signature_type: SignatureTypes, **kwargs) -> ImageSourceSignImageConfig:
'\n Verifies an image, then signs it without storing it in the image source.\n\n Args:\n signer: The signer used to create the signature value.\n image_name: The image name.\n signature_type: Specifies what type of signature action to perform.\n\n Returns:\n NamedTuple:\n image_config: The ImageConfig object corresponding to the signed image.\n signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.\n verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.\n '
data = (await self.verify_image_integrity(image_name, **kwargs))
try:
signature_value = (await data.image_config.sign(signer, signature_type))
except Exception:
for file in (data.compressed_layer_files + data.uncompressed_layer_files):
file.close()
raise
return ImageSourceSignImageConfig(image_config=data.image_config, signature_value=signature_value, verify_image_data=data)<|docstring|>Verifies an image, then signs it without storing it in the image source.
Args:
signer: The signer used to create the signature value.
image_name: The image name.
signature_type: Specifies what type of signature action to perform.
Returns:
NamedTuple:
image_config: The ImageConfig object corresponding to the signed image.
signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.
verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.<|endoftext|> |
2e5358344597c2021abd988201998f8390f66f36a8a9a695417a516b7fe4d6b0 | async def _verify_image_config(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageConfig:
'\n Verifies the integration of an image configuration against metadata contained within a manifest.\n\n Args:\n image_name: The image name for which to retrieve the configuration.\n\n Returns:\n NamedTuple:\n image_config: The image configuration.\n image_layers: The listing of image layer identifiers.\n manifest: The image-source specific manifest.\n manifest_layers: The listing of manifest layer identifiers.\n '
LOGGER.debug('Verifying Integrity: %s ...', image_name.resolve_name())
manifest = (await self.get_manifest(image_name, **kwargs))
LOGGER.debug(' manifest digest: %s', xellipsis(manifest.get_digest()))
config_digest = manifest.get_config_digest(image_name)
LOGGER.debug(' config digest: %s', xellipsis(config_digest))
manifest_layers = manifest.get_layers(image_name)
LOGGER.debug(' manifest layers:')
for layer in manifest_layers:
LOGGER.debug(' %s', xellipsis(layer))
image_config = (await self.get_image_config(image_name, **kwargs))
config_digest_canonical = image_config.get_digest_canonical()
LOGGER.debug(' config digest (canonical): %s', xellipsis(config_digest_canonical))
must_be_equal(config_digest, image_config.get_digest(), 'Image config digest mismatch')
image_layers = image_config.get_image_layers()
LOGGER.debug(' image layers:')
for layer in image_layers:
LOGGER.debug(' %s', xellipsis(layer))
must_be_equal(len(manifest_layers), len(image_layers), 'Layer count mismatch')
return ImageSourceVerifyImageConfig(image_config=image_config, image_layers=image_layers, manifest=manifest, manifest_layers=manifest_layers) | Verifies the integration of an image configuration against metadata contained within a manifest.
Args:
image_name: The image name for which to retrieve the configuration.
Returns:
NamedTuple:
image_config: The image configuration.
image_layers: The listing of image layer identifiers.
manifest: The image-source specific manifest.
manifest_layers: The listing of manifest layer identifiers. | docker_sign_verify/imagesource.py | _verify_image_config | crashvb/docker-sign-verify | 4 | python | async def _verify_image_config(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageConfig:
'\n Verifies the integration of an image configuration against metadata contained within a manifest.\n\n Args:\n image_name: The image name for which to retrieve the configuration.\n\n Returns:\n NamedTuple:\n image_config: The image configuration.\n image_layers: The listing of image layer identifiers.\n manifest: The image-source specific manifest.\n manifest_layers: The listing of manifest layer identifiers.\n '
LOGGER.debug('Verifying Integrity: %s ...', image_name.resolve_name())
manifest = (await self.get_manifest(image_name, **kwargs))
LOGGER.debug(' manifest digest: %s', xellipsis(manifest.get_digest()))
config_digest = manifest.get_config_digest(image_name)
LOGGER.debug(' config digest: %s', xellipsis(config_digest))
manifest_layers = manifest.get_layers(image_name)
LOGGER.debug(' manifest layers:')
for layer in manifest_layers:
LOGGER.debug(' %s', xellipsis(layer))
image_config = (await self.get_image_config(image_name, **kwargs))
config_digest_canonical = image_config.get_digest_canonical()
LOGGER.debug(' config digest (canonical): %s', xellipsis(config_digest_canonical))
must_be_equal(config_digest, image_config.get_digest(), 'Image config digest mismatch')
image_layers = image_config.get_image_layers()
LOGGER.debug(' image layers:')
for layer in image_layers:
LOGGER.debug(' %s', xellipsis(layer))
must_be_equal(len(manifest_layers), len(image_layers), 'Layer count mismatch')
return ImageSourceVerifyImageConfig(image_config=image_config, image_layers=image_layers, manifest=manifest, manifest_layers=manifest_layers) | async def _verify_image_config(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageConfig:
'\n Verifies the integration of an image configuration against metadata contained within a manifest.\n\n Args:\n image_name: The image name for which to retrieve the configuration.\n\n Returns:\n NamedTuple:\n image_config: The image configuration.\n image_layers: The listing of image layer identifiers.\n manifest: The image-source specific manifest.\n manifest_layers: The listing of manifest layer identifiers.\n '
LOGGER.debug('Verifying Integrity: %s ...', image_name.resolve_name())
manifest = (await self.get_manifest(image_name, **kwargs))
LOGGER.debug(' manifest digest: %s', xellipsis(manifest.get_digest()))
config_digest = manifest.get_config_digest(image_name)
LOGGER.debug(' config digest: %s', xellipsis(config_digest))
manifest_layers = manifest.get_layers(image_name)
LOGGER.debug(' manifest layers:')
for layer in manifest_layers:
LOGGER.debug(' %s', xellipsis(layer))
image_config = (await self.get_image_config(image_name, **kwargs))
config_digest_canonical = image_config.get_digest_canonical()
LOGGER.debug(' config digest (canonical): %s', xellipsis(config_digest_canonical))
must_be_equal(config_digest, image_config.get_digest(), 'Image config digest mismatch')
image_layers = image_config.get_image_layers()
LOGGER.debug(' image layers:')
for layer in image_layers:
LOGGER.debug(' %s', xellipsis(layer))
must_be_equal(len(manifest_layers), len(image_layers), 'Layer count mismatch')
return ImageSourceVerifyImageConfig(image_config=image_config, image_layers=image_layers, manifest=manifest, manifest_layers=manifest_layers)<|docstring|>Verifies the integration of an image configuration against metadata contained within a manifest.
Args:
image_name: The image name for which to retrieve the configuration.
Returns:
NamedTuple:
image_config: The image configuration.
image_layers: The listing of image layer identifiers.
manifest: The image-source specific manifest.
manifest_layers: The listing of manifest layer identifiers.<|endoftext|> |
37a7e445c357e29726e458472386db341f2048ece6a74bbb241a86dfd8f8c1b0 | @abc.abstractmethod
async def get_image_config(self, image_name: ImageName, **kwargs) -> ImageConfig:
'\n Retrieves an image configuration (config.json).\n\n Args:\n image_name: The image name.\n\n Returns:\n The image configuration.\n ' | Retrieves an image configuration (config.json).
Args:
image_name: The image name.
Returns:
The image configuration. | docker_sign_verify/imagesource.py | get_image_config | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def get_image_config(self, image_name: ImageName, **kwargs) -> ImageConfig:
'\n Retrieves an image configuration (config.json).\n\n Args:\n image_name: The image name.\n\n Returns:\n The image configuration.\n ' | @abc.abstractmethod
async def get_image_config(self, image_name: ImageName, **kwargs) -> ImageConfig:
'\n Retrieves an image configuration (config.json).\n\n Args:\n image_name: The image name.\n\n Returns:\n The image configuration.\n '<|docstring|>Retrieves an image configuration (config.json).
Args:
image_name: The image name.
Returns:
The image configuration.<|endoftext|> |
fad510924e0d5edfabde2b59b68b8d509227031f81758738d793910b1374b50e | @abc.abstractmethod
async def get_image_layer_to_disk(self, image_name: ImageName, layer: FormattedSHA256, file, **kwargs) -> ImageSourceGetImageLayerToDisk:
'\n Retrieves a single image layer stored to disk.\n\n Args:\n image_name: The image name.\n layer: The layer identifier in the form: <hash type>:<digest value>.\n file: File in which to store the image layer.\n ' | Retrieves a single image layer stored to disk.
Args:
image_name: The image name.
layer: The layer identifier in the form: <hash type>:<digest value>.
file: File in which to store the image layer. | docker_sign_verify/imagesource.py | get_image_layer_to_disk | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def get_image_layer_to_disk(self, image_name: ImageName, layer: FormattedSHA256, file, **kwargs) -> ImageSourceGetImageLayerToDisk:
'\n Retrieves a single image layer stored to disk.\n\n Args:\n image_name: The image name.\n layer: The layer identifier in the form: <hash type>:<digest value>.\n file: File in which to store the image layer.\n ' | @abc.abstractmethod
async def get_image_layer_to_disk(self, image_name: ImageName, layer: FormattedSHA256, file, **kwargs) -> ImageSourceGetImageLayerToDisk:
'\n Retrieves a single image layer stored to disk.\n\n Args:\n image_name: The image name.\n layer: The layer identifier in the form: <hash type>:<digest value>.\n file: File in which to store the image layer.\n '<|docstring|>Retrieves a single image layer stored to disk.
Args:
image_name: The image name.
layer: The layer identifier in the form: <hash type>:<digest value>.
file: File in which to store the image layer.<|endoftext|> |
b53edb874285159953fc5d34ace1ebc6c9bd8537ef649edf1b8f5009d446da95 | @abc.abstractmethod
async def get_manifest(self, image_name: ImageName=None, **kwargs) -> Manifest:
'\n Retrieves the manifest for a given image.\n\n Args:\n image_name: The name image for which to retrieve the manifest.\n\n Returns:\n The image source-specific manifest.\n ' | Retrieves the manifest for a given image.
Args:
image_name: The name image for which to retrieve the manifest.
Returns:
The image source-specific manifest. | docker_sign_verify/imagesource.py | get_manifest | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def get_manifest(self, image_name: ImageName=None, **kwargs) -> Manifest:
'\n Retrieves the manifest for a given image.\n\n Args:\n image_name: The name image for which to retrieve the manifest.\n\n Returns:\n The image source-specific manifest.\n ' | @abc.abstractmethod
async def get_manifest(self, image_name: ImageName=None, **kwargs) -> Manifest:
'\n Retrieves the manifest for a given image.\n\n Args:\n image_name: The name image for which to retrieve the manifest.\n\n Returns:\n The image source-specific manifest.\n '<|docstring|>Retrieves the manifest for a given image.
Args:
image_name: The name image for which to retrieve the manifest.
Returns:
The image source-specific manifest.<|endoftext|> |
462dadc4d236930263b72ecaa98b358b5f5b683edebf3b7efbdda1884cf08865 | @abc.abstractmethod
async def layer_exists(self, image_name: ImageName, layer: FormattedSHA256, **kwargs) -> bool:
'\n Checks if a given image layer exists.\n\n Args:\n image_name: The image name.\n layer: The layer identifier in the form: <hash type>:<digest value>.\n\n Returns:\n bool: True if the layer exists, False otherwise.\n ' | Checks if a given image layer exists.
Args:
image_name: The image name.
layer: The layer identifier in the form: <hash type>:<digest value>.
Returns:
bool: True if the layer exists, False otherwise. | docker_sign_verify/imagesource.py | layer_exists | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def layer_exists(self, image_name: ImageName, layer: FormattedSHA256, **kwargs) -> bool:
'\n Checks if a given image layer exists.\n\n Args:\n image_name: The image name.\n layer: The layer identifier in the form: <hash type>:<digest value>.\n\n Returns:\n bool: True if the layer exists, False otherwise.\n ' | @abc.abstractmethod
async def layer_exists(self, image_name: ImageName, layer: FormattedSHA256, **kwargs) -> bool:
'\n Checks if a given image layer exists.\n\n Args:\n image_name: The image name.\n layer: The layer identifier in the form: <hash type>:<digest value>.\n\n Returns:\n bool: True if the layer exists, False otherwise.\n '<|docstring|>Checks if a given image layer exists.
Args:
image_name: The image name.
layer: The layer identifier in the form: <hash type>:<digest value>.
Returns:
bool: True if the layer exists, False otherwise.<|endoftext|> |
2734d31ca3a84ea587958bd1161efb35dea8ee0874b732191e2ffb8002bab508 | @abc.abstractmethod
async def put_image(self, image_source, image_name: ImageName, manifest: Manifest, image_config: ImageConfig, layer_files: List, **kwargs):
'\n Stores a given image (manifest, image_config, and layers) from another image source.\n\n Args:\n image_source: The source image source.\n image_name: The name of the image being stored.\n manifest: The image source-specific manifest to be stored, in source image source format.\n image_config: The image configuration to be stored.\n layer_files: List of files from which to read the layer content, in source image source format.\n ' | Stores a given image (manifest, image_config, and layers) from another image source.
Args:
image_source: The source image source.
image_name: The name of the image being stored.
manifest: The image source-specific manifest to be stored, in source image source format.
image_config: The image configuration to be stored.
layer_files: List of files from which to read the layer content, in source image source format. | docker_sign_verify/imagesource.py | put_image | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def put_image(self, image_source, image_name: ImageName, manifest: Manifest, image_config: ImageConfig, layer_files: List, **kwargs):
'\n Stores a given image (manifest, image_config, and layers) from another image source.\n\n Args:\n image_source: The source image source.\n image_name: The name of the image being stored.\n manifest: The image source-specific manifest to be stored, in source image source format.\n image_config: The image configuration to be stored.\n layer_files: List of files from which to read the layer content, in source image source format.\n ' | @abc.abstractmethod
async def put_image(self, image_source, image_name: ImageName, manifest: Manifest, image_config: ImageConfig, layer_files: List, **kwargs):
'\n Stores a given image (manifest, image_config, and layers) from another image source.\n\n Args:\n image_source: The source image source.\n image_name: The name of the image being stored.\n manifest: The image source-specific manifest to be stored, in source image source format.\n image_config: The image configuration to be stored.\n layer_files: List of files from which to read the layer content, in source image source format.\n '<|docstring|>Stores a given image (manifest, image_config, and layers) from another image source.
Args:
image_source: The source image source.
image_name: The name of the image being stored.
manifest: The image source-specific manifest to be stored, in source image source format.
image_config: The image configuration to be stored.
layer_files: List of files from which to read the layer content, in source image source format.<|endoftext|> |
3aaa432f51db06eee8989b8702b2f38bb532c60c9e8bc315c8aa5464a73606dc | @abc.abstractmethod
async def put_image_config(self, image_name: ImageName, image_config: ImageConfig, **kwargs):
'\n Assigns an image configuration (config.json).\n\n Args:\n image_name: The image name.\n image_config: The image configuration to be assigned.\n ' | Assigns an image configuration (config.json).
Args:
image_name: The image name.
image_config: The image configuration to be assigned. | docker_sign_verify/imagesource.py | put_image_config | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def put_image_config(self, image_name: ImageName, image_config: ImageConfig, **kwargs):
'\n Assigns an image configuration (config.json).\n\n Args:\n image_name: The image name.\n image_config: The image configuration to be assigned.\n ' | @abc.abstractmethod
async def put_image_config(self, image_name: ImageName, image_config: ImageConfig, **kwargs):
'\n Assigns an image configuration (config.json).\n\n Args:\n image_name: The image name.\n image_config: The image configuration to be assigned.\n '<|docstring|>Assigns an image configuration (config.json).
Args:
image_name: The image name.
image_config: The image configuration to be assigned.<|endoftext|> |
5ca06fd8f361a34c827cc3d52abf17cf94cf8d064cc5a5540984fbbf8344f13c | @abc.abstractmethod
async def put_image_layer(self, image_name: ImageName, content, **kwargs):
'\n Assigns a single image layer.\n\n Args:\n image_name: The image name.\n content: The layer content.\n ' | Assigns a single image layer.
Args:
image_name: The image name.
content: The layer content. | docker_sign_verify/imagesource.py | put_image_layer | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def put_image_layer(self, image_name: ImageName, content, **kwargs):
'\n Assigns a single image layer.\n\n Args:\n image_name: The image name.\n content: The layer content.\n ' | @abc.abstractmethod
async def put_image_layer(self, image_name: ImageName, content, **kwargs):
'\n Assigns a single image layer.\n\n Args:\n image_name: The image name.\n content: The layer content.\n '<|docstring|>Assigns a single image layer.
Args:
image_name: The image name.
content: The layer content.<|endoftext|> |
18b24397406bb6171f51dd4103a63b5c6d32f6454ac880453d81fec4e2e18299 | @abc.abstractmethod
async def put_image_layer_from_disk(self, image_name: ImageName, file, **kwargs):
'\n Assigns a single image layer read from disk.\n\n Args:\n image_name: The image name.\n file: File from which to read the layer content.\n ' | Assigns a single image layer read from disk.
Args:
image_name: The image name.
file: File from which to read the layer content. | docker_sign_verify/imagesource.py | put_image_layer_from_disk | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def put_image_layer_from_disk(self, image_name: ImageName, file, **kwargs):
'\n Assigns a single image layer read from disk.\n\n Args:\n image_name: The image name.\n file: File from which to read the layer content.\n ' | @abc.abstractmethod
async def put_image_layer_from_disk(self, image_name: ImageName, file, **kwargs):
'\n Assigns a single image layer read from disk.\n\n Args:\n image_name: The image name.\n file: File from which to read the layer content.\n '<|docstring|>Assigns a single image layer read from disk.
Args:
image_name: The image name.
file: File from which to read the layer content.<|endoftext|> |
3146ea8398e3cfb0b79d4e8be081cb8941a1ac537f41260e874c6d5e75afac30 | @abc.abstractmethod
async def put_manifest(self, manifest: Manifest, image_name: ImageName=None, **kwargs):
'\n Assigns the manifest for a given image.\n\n Args:\n manifest: The image source-specific manifest to be assigned.\n image_name: The name of the image for which to assign the manifest.\n ' | Assigns the manifest for a given image.
Args:
manifest: The image source-specific manifest to be assigned.
image_name: The name of the image for which to assign the manifest. | docker_sign_verify/imagesource.py | put_manifest | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def put_manifest(self, manifest: Manifest, image_name: ImageName=None, **kwargs):
'\n Assigns the manifest for a given image.\n\n Args:\n manifest: The image source-specific manifest to be assigned.\n image_name: The name of the image for which to assign the manifest.\n ' | @abc.abstractmethod
async def put_manifest(self, manifest: Manifest, image_name: ImageName=None, **kwargs):
'\n Assigns the manifest for a given image.\n\n Args:\n manifest: The image source-specific manifest to be assigned.\n image_name: The name of the image for which to assign the manifest.\n '<|docstring|>Assigns the manifest for a given image.
Args:
manifest: The image source-specific manifest to be assigned.
image_name: The name of the image for which to assign the manifest.<|endoftext|> |
3a5fc5b360494728a23df21a275c833aa50fc06f568055f5eade76c78f5015e8 | @abc.abstractmethod
async def sign_image(self, signer: Signer, src_image_name: ImageName, dest_image_source, dest_image_name: ImageName, signature_type: SignatureTypes=SignatureTypes.SIGN, **kwargs) -> ImageSourceSignImage:
'\n Retrieves, verifies and signs the image, storing it in the destination image source.\n\n Args:\n signer: The signer used to create the signature value.\n src_image_name: The source image name.\n dest_image_source: The destination image source into which to store the signed image.\n dest_image_name: The description image name.\n signature_type: Specifies what type of signature action to perform.\n\n Returns:\n NamedTuple:\n image_config: The ImageConfig object corresponding to the signed image.\n signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.\n verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.\n manifest_signed: The signed image source-specific manifest.\n ' | Retrieves, verifies and signs the image, storing it in the destination image source.
Args:
signer: The signer used to create the signature value.
src_image_name: The source image name.
dest_image_source: The destination image source into which to store the signed image.
dest_image_name: The description image name.
signature_type: Specifies what type of signature action to perform.
Returns:
NamedTuple:
image_config: The ImageConfig object corresponding to the signed image.
signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.
verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.
manifest_signed: The signed image source-specific manifest. | docker_sign_verify/imagesource.py | sign_image | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def sign_image(self, signer: Signer, src_image_name: ImageName, dest_image_source, dest_image_name: ImageName, signature_type: SignatureTypes=SignatureTypes.SIGN, **kwargs) -> ImageSourceSignImage:
'\n Retrieves, verifies and signs the image, storing it in the destination image source.\n\n Args:\n signer: The signer used to create the signature value.\n src_image_name: The source image name.\n dest_image_source: The destination image source into which to store the signed image.\n dest_image_name: The description image name.\n signature_type: Specifies what type of signature action to perform.\n\n Returns:\n NamedTuple:\n image_config: The ImageConfig object corresponding to the signed image.\n signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.\n verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.\n manifest_signed: The signed image source-specific manifest.\n ' | @abc.abstractmethod
async def sign_image(self, signer: Signer, src_image_name: ImageName, dest_image_source, dest_image_name: ImageName, signature_type: SignatureTypes=SignatureTypes.SIGN, **kwargs) -> ImageSourceSignImage:
'\n Retrieves, verifies and signs the image, storing it in the destination image source.\n\n Args:\n signer: The signer used to create the signature value.\n src_image_name: The source image name.\n dest_image_source: The destination image source into which to store the signed image.\n dest_image_name: The description image name.\n signature_type: Specifies what type of signature action to perform.\n\n Returns:\n NamedTuple:\n image_config: The ImageConfig object corresponding to the signed image.\n signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.\n verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.\n manifest_signed: The signed image source-specific manifest.\n '<|docstring|>Retrieves, verifies and signs the image, storing it in the destination image source.
Args:
signer: The signer used to create the signature value.
src_image_name: The source image name.
dest_image_source: The destination image source into which to store the signed image.
dest_image_name: The description image name.
signature_type: Specifies what type of signature action to perform.
Returns:
NamedTuple:
image_config: The ImageConfig object corresponding to the signed image.
signature_value: as defined by :func:~docker_sign_verify.ImageConfig.sign.
verify_image_data: as defined by :func:~docker_sign_verify.ImageSource.verify_image_integrity.
manifest_signed: The signed image source-specific manifest.<|endoftext|> |
9ab7321fab9953d401c7a19854a1b91f328a640da4aa6c26b71dd391841d91e0 | @abc.abstractmethod
async def verify_image_integrity(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageIntegrity:
'\n Verifies that the image source data format is consistent with respect to the image configuration and image\n layers, and that the image configuration and image layers are internally consistent (the digest values match).\n\n Args:\n image_name: The image name.\n\n Returns:\n NamedTuple:\n compressed_layer_files: The list of compressed layer files on disk (optional).\n image config: The image configuration.\n manifest: The image source-specific manifest file (archive, registry, repository).\n uncompressed_layer_files: The list of uncompressed layer files on disk.\n ' | Verifies that the image source data format is consistent with respect to the image configuration and image
layers, and that the image configuration and image layers are internally consistent (the digest values match).
Args:
image_name: The image name.
Returns:
NamedTuple:
compressed_layer_files: The list of compressed layer files on disk (optional).
image config: The image configuration.
manifest: The image source-specific manifest file (archive, registry, repository).
uncompressed_layer_files: The list of uncompressed layer files on disk. | docker_sign_verify/imagesource.py | verify_image_integrity | crashvb/docker-sign-verify | 4 | python | @abc.abstractmethod
async def verify_image_integrity(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageIntegrity:
'\n Verifies that the image source data format is consistent with respect to the image configuration and image\n layers, and that the image configuration and image layers are internally consistent (the digest values match).\n\n Args:\n image_name: The image name.\n\n Returns:\n NamedTuple:\n compressed_layer_files: The list of compressed layer files on disk (optional).\n image config: The image configuration.\n manifest: The image source-specific manifest file (archive, registry, repository).\n uncompressed_layer_files: The list of uncompressed layer files on disk.\n ' | @abc.abstractmethod
async def verify_image_integrity(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageIntegrity:
'\n Verifies that the image source data format is consistent with respect to the image configuration and image\n layers, and that the image configuration and image layers are internally consistent (the digest values match).\n\n Args:\n image_name: The image name.\n\n Returns:\n NamedTuple:\n compressed_layer_files: The list of compressed layer files on disk (optional).\n image config: The image configuration.\n manifest: The image source-specific manifest file (archive, registry, repository).\n uncompressed_layer_files: The list of uncompressed layer files on disk.\n '<|docstring|>Verifies that the image source data format is consistent with respect to the image configuration and image
layers, and that the image configuration and image layers are internally consistent (the digest values match).
Args:
image_name: The image name.
Returns:
NamedTuple:
compressed_layer_files: The list of compressed layer files on disk (optional).
image config: The image configuration.
manifest: The image source-specific manifest file (archive, registry, repository).
uncompressed_layer_files: The list of uncompressed layer files on disk.<|endoftext|> |
844b4c0a2950930f2d93e52844fd08829e4710425d5bd7f3e1639bc62cc0ae45 | async def verify_image_signatures(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageSignatures:
'\n Verifies that signatures contained within the image source data format are valid (that the image has not been\n modified since they were created)\n\n Args:\n image_name: The image name.\n\n Returns:\n NamedTuple:\n compressed_layer_files: The list of compressed layer files on disk (optional).\n image config: The image configuration.\n manifest: The image source-specific manifest file (archive, registry, repository).\n signatures: as defined by :func:~docker_sign_verify.ImageConfig.verify_signatures.\n uncompressed_layer_files: The list of uncompressed layer files on disk.\n '
data = (await self.verify_image_integrity(image_name, **kwargs))
try:
LOGGER.debug('Verifying Signature(s): %s ...', image_name.resolve_name())
LOGGER.debug(' config digest (signed): %s', xellipsis(data.image_config.get_digest()))
signatures = (await data.image_config.verify_signatures(signer_kwargs=self.signer_kwargs))
data = ImageSourceVerifyImageSignatures(compressed_layer_files=data.compressed_layer_files, image_config=data.image_config, manifest=data.manifest, signatures=signatures, uncompressed_layer_files=data.uncompressed_layer_files)
LOGGER.debug(' signatures:')
for result in data.signatures.results:
if (not hasattr(result, 'valid')):
raise UnsupportedSignatureTypeError(f'Unsupported signature type: {type(result)}!')
if (hasattr(result, 'signer_short') and hasattr(result, 'signer_long')):
if (not result.valid):
raise SignatureMismatchError(f'Verification failed for signature; {result.signer_short}')
for line in result.signer_long.splitlines():
LOGGER.debug(line)
else:
if (not result.valid):
raise SignatureMismatchError(f'Verification failed for signature; unknown type: {type(result)}!')
LOGGER.debug(' Signature of unknown type: %s', type(result))
except Exception:
for file in (data.compressed_layer_files + data.uncompressed_layer_files):
file.close()
raise
LOGGER.debug('Signature check passed.')
return data | Verifies that signatures contained within the image source data format are valid (that the image has not been
modified since they were created)
Args:
image_name: The image name.
Returns:
NamedTuple:
compressed_layer_files: The list of compressed layer files on disk (optional).
image config: The image configuration.
manifest: The image source-specific manifest file (archive, registry, repository).
signatures: as defined by :func:~docker_sign_verify.ImageConfig.verify_signatures.
uncompressed_layer_files: The list of uncompressed layer files on disk. | docker_sign_verify/imagesource.py | verify_image_signatures | crashvb/docker-sign-verify | 4 | python | async def verify_image_signatures(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageSignatures:
'\n Verifies that signatures contained within the image source data format are valid (that the image has not been\n modified since they were created)\n\n Args:\n image_name: The image name.\n\n Returns:\n NamedTuple:\n compressed_layer_files: The list of compressed layer files on disk (optional).\n image config: The image configuration.\n manifest: The image source-specific manifest file (archive, registry, repository).\n signatures: as defined by :func:~docker_sign_verify.ImageConfig.verify_signatures.\n uncompressed_layer_files: The list of uncompressed layer files on disk.\n '
data = (await self.verify_image_integrity(image_name, **kwargs))
try:
LOGGER.debug('Verifying Signature(s): %s ...', image_name.resolve_name())
LOGGER.debug(' config digest (signed): %s', xellipsis(data.image_config.get_digest()))
signatures = (await data.image_config.verify_signatures(signer_kwargs=self.signer_kwargs))
data = ImageSourceVerifyImageSignatures(compressed_layer_files=data.compressed_layer_files, image_config=data.image_config, manifest=data.manifest, signatures=signatures, uncompressed_layer_files=data.uncompressed_layer_files)
LOGGER.debug(' signatures:')
for result in data.signatures.results:
if (not hasattr(result, 'valid')):
raise UnsupportedSignatureTypeError(f'Unsupported signature type: {type(result)}!')
if (hasattr(result, 'signer_short') and hasattr(result, 'signer_long')):
if (not result.valid):
raise SignatureMismatchError(f'Verification failed for signature; {result.signer_short}')
for line in result.signer_long.splitlines():
LOGGER.debug(line)
else:
if (not result.valid):
raise SignatureMismatchError(f'Verification failed for signature; unknown type: {type(result)}!')
LOGGER.debug(' Signature of unknown type: %s', type(result))
except Exception:
for file in (data.compressed_layer_files + data.uncompressed_layer_files):
file.close()
raise
LOGGER.debug('Signature check passed.')
return data | async def verify_image_signatures(self, image_name: ImageName, **kwargs) -> ImageSourceVerifyImageSignatures:
'\n Verifies that signatures contained within the image source data format are valid (that the image has not been\n modified since they were created)\n\n Args:\n image_name: The image name.\n\n Returns:\n NamedTuple:\n compressed_layer_files: The list of compressed layer files on disk (optional).\n image config: The image configuration.\n manifest: The image source-specific manifest file (archive, registry, repository).\n signatures: as defined by :func:~docker_sign_verify.ImageConfig.verify_signatures.\n uncompressed_layer_files: The list of uncompressed layer files on disk.\n '
data = (await self.verify_image_integrity(image_name, **kwargs))
try:
LOGGER.debug('Verifying Signature(s): %s ...', image_name.resolve_name())
LOGGER.debug(' config digest (signed): %s', xellipsis(data.image_config.get_digest()))
signatures = (await data.image_config.verify_signatures(signer_kwargs=self.signer_kwargs))
data = ImageSourceVerifyImageSignatures(compressed_layer_files=data.compressed_layer_files, image_config=data.image_config, manifest=data.manifest, signatures=signatures, uncompressed_layer_files=data.uncompressed_layer_files)
LOGGER.debug(' signatures:')
for result in data.signatures.results:
if (not hasattr(result, 'valid')):
raise UnsupportedSignatureTypeError(f'Unsupported signature type: {type(result)}!')
if (hasattr(result, 'signer_short') and hasattr(result, 'signer_long')):
if (not result.valid):
raise SignatureMismatchError(f'Verification failed for signature; {result.signer_short}')
for line in result.signer_long.splitlines():
LOGGER.debug(line)
else:
if (not result.valid):
raise SignatureMismatchError(f'Verification failed for signature; unknown type: {type(result)}!')
LOGGER.debug(' Signature of unknown type: %s', type(result))
except Exception:
for file in (data.compressed_layer_files + data.uncompressed_layer_files):
file.close()
raise
LOGGER.debug('Signature check passed.')
return data<|docstring|>Verifies that signatures contained within the image source data format are valid (that the image has not been
modified since they were created)
Args:
image_name: The image name.
Returns:
NamedTuple:
compressed_layer_files: The list of compressed layer files on disk (optional).
image config: The image configuration.
manifest: The image source-specific manifest file (archive, registry, repository).
signatures: as defined by :func:~docker_sign_verify.ImageConfig.verify_signatures.
uncompressed_layer_files: The list of uncompressed layer files on disk.<|endoftext|> |
826c38869a844418ab47a7d6912204319be1f73d3c61116511456ee8e354b71a | def plugin_id(name, version):
'Creates an ID for the plugins.\n\n Parameters\n ----------\n name: str\n A string identifying the plugin.\n version: int\n A version number for the plugin.\n '
if ((not isinstance(version, int)) or (version < 0)):
raise ValueError('version must be a non negative integer')
return '.'.join(['pyfibre', 'plugin', name, str(version)]) | Creates an ID for the plugins.
Parameters
----------
name: str
A string identifying the plugin.
version: int
A version number for the plugin. | pyfibre/ids.py | plugin_id | franklongford/ImageCol | 2 | python | def plugin_id(name, version):
'Creates an ID for the plugins.\n\n Parameters\n ----------\n name: str\n A string identifying the plugin.\n version: int\n A version number for the plugin.\n '
if ((not isinstance(version, int)) or (version < 0)):
raise ValueError('version must be a non negative integer')
return '.'.join(['pyfibre', 'plugin', name, str(version)]) | def plugin_id(name, version):
'Creates an ID for the plugins.\n\n Parameters\n ----------\n name: str\n A string identifying the plugin.\n version: int\n A version number for the plugin.\n '
if ((not isinstance(version, int)) or (version < 0)):
raise ValueError('version must be a non negative integer')
return '.'.join(['pyfibre', 'plugin', name, str(version)])<|docstring|>Creates an ID for the plugins.
Parameters
----------
name: str
A string identifying the plugin.
version: int
A version number for the plugin.<|endoftext|> |
82d33a262fad73df6b392931cfdcb113f6e1324b343e87fc04e1ed30b9f4aef0 | def __init__(self, iterable: Iterable[JSONTypes]=tuple(), *, redis: Optional[Redis]=None, key: Optional[str]=None) -> None:
'Initialize the RedisSet. O(n)'
super().__init__(redis=redis, key=key)
if iterable:
with self._watch(iterable) as pipeline:
if pipeline.exists(self.key):
raise KeyExistsError(self.redis, self.key)
self.__populate(pipeline, iterable) | Initialize the RedisSet. O(n) | pottery/set.py | __init__ | sthagen/pottery | 1 | python | def __init__(self, iterable: Iterable[JSONTypes]=tuple(), *, redis: Optional[Redis]=None, key: Optional[str]=None) -> None:
super().__init__(redis=redis, key=key)
if iterable:
with self._watch(iterable) as pipeline:
if pipeline.exists(self.key):
raise KeyExistsError(self.redis, self.key)
self.__populate(pipeline, iterable) | def __init__(self, iterable: Iterable[JSONTypes]=tuple(), *, redis: Optional[Redis]=None, key: Optional[str]=None) -> None:
super().__init__(redis=redis, key=key)
if iterable:
with self._watch(iterable) as pipeline:
if pipeline.exists(self.key):
raise KeyExistsError(self.redis, self.key)
self.__populate(pipeline, iterable)<|docstring|>Initialize the RedisSet. O(n)<|endoftext|> |
29071907a03a0d92c29251c6b7be52f30350cc857dbd0fcf44d5cb90c5421016 | def __contains__(self, value: Any) -> bool:
's.__contains__(element) <==> element in s. O(1)'
try:
encoded_value = self._encode(value)
except TypeError:
return False
return self.redis.sismember(self.key, encoded_value) | s.__contains__(element) <==> element in s. O(1) | pottery/set.py | __contains__ | sthagen/pottery | 1 | python | def __contains__(self, value: Any) -> bool:
try:
encoded_value = self._encode(value)
except TypeError:
return False
return self.redis.sismember(self.key, encoded_value) | def __contains__(self, value: Any) -> bool:
try:
encoded_value = self._encode(value)
except TypeError:
return False
return self.redis.sismember(self.key, encoded_value)<|docstring|>s.__contains__(element) <==> element in s. O(1)<|endoftext|> |
e9ca44aedb544eede9f3d81595c509ccd5d6c1faf441acd0f4e4a5d1facf3eca | def contains_many(self, *values: JSONTypes) -> Generator[(bool, None, None)]:
'Yield whether this RedisSet contains multiple elements. O(n)'
encoded_values = []
for value in values:
try:
encoded_value = self._encode(value)
except TypeError:
encoded_value = str(uuid.uuid4())
encoded_values.append(encoded_value)
for is_member in self.redis.smismember(self.key, encoded_values):
(yield bool(is_member)) | Yield whether this RedisSet contains multiple elements. O(n) | pottery/set.py | contains_many | sthagen/pottery | 1 | python | def contains_many(self, *values: JSONTypes) -> Generator[(bool, None, None)]:
encoded_values = []
for value in values:
try:
encoded_value = self._encode(value)
except TypeError:
encoded_value = str(uuid.uuid4())
encoded_values.append(encoded_value)
for is_member in self.redis.smismember(self.key, encoded_values):
(yield bool(is_member)) | def contains_many(self, *values: JSONTypes) -> Generator[(bool, None, None)]:
encoded_values = []
for value in values:
try:
encoded_value = self._encode(value)
except TypeError:
encoded_value = str(uuid.uuid4())
encoded_values.append(encoded_value)
for is_member in self.redis.smismember(self.key, encoded_values):
(yield bool(is_member))<|docstring|>Yield whether this RedisSet contains multiple elements. O(n)<|endoftext|> |
60013fc12016a8cfb077970fdd50990d1e2b9fb78863402d9f134149c83f24f6 | def __len__(self) -> int:
'Return the number of elements in the RedisSet. O(1)'
return self.redis.scard(self.key) | Return the number of elements in the RedisSet. O(1) | pottery/set.py | __len__ | sthagen/pottery | 1 | python | def __len__(self) -> int:
return self.redis.scard(self.key) | def __len__(self) -> int:
return self.redis.scard(self.key)<|docstring|>Return the number of elements in the RedisSet. O(1)<|endoftext|> |
1cd6a0dd6f6aaea56a8082a420da84fec4bcc4813fc6edd38cb4b3cd868a6f3d | def add(self, value: JSONTypes) -> None:
'Add an element to the RedisSet. O(1)'
encoded_value = self._encode(value)
self.redis.sadd(self.key, encoded_value) | Add an element to the RedisSet. O(1) | pottery/set.py | add | sthagen/pottery | 1 | python | def add(self, value: JSONTypes) -> None:
encoded_value = self._encode(value)
self.redis.sadd(self.key, encoded_value) | def add(self, value: JSONTypes) -> None:
encoded_value = self._encode(value)
self.redis.sadd(self.key, encoded_value)<|docstring|>Add an element to the RedisSet. O(1)<|endoftext|> |
7ee20ec16cd19023875ef98b9439f615f7284f81618265150fb651ff4811bf64 | def discard(self, value: JSONTypes) -> None:
'Remove an element from the RedisSet. O(1)'
encoded_value = self._encode(value)
self.redis.srem(self.key, encoded_value) | Remove an element from the RedisSet. O(1) | pottery/set.py | discard | sthagen/pottery | 1 | python | def discard(self, value: JSONTypes) -> None:
encoded_value = self._encode(value)
self.redis.srem(self.key, encoded_value) | def discard(self, value: JSONTypes) -> None:
encoded_value = self._encode(value)
self.redis.srem(self.key, encoded_value)<|docstring|>Remove an element from the RedisSet. O(1)<|endoftext|> |
2d3a8d7dfecaea54a872150430c89361fb738fe78b30a75980023a2ebe2163c0 | def __repr__(self) -> str:
'Return the string representation of the RedisSet. O(n)'
warnings.warn(cast(str, InefficientAccessWarning.__doc__), InefficientAccessWarning)
return f'{self.__class__.__name__}{self.__to_set()}' | Return the string representation of the RedisSet. O(n) | pottery/set.py | __repr__ | sthagen/pottery | 1 | python | def __repr__(self) -> str:
warnings.warn(cast(str, InefficientAccessWarning.__doc__), InefficientAccessWarning)
return f'{self.__class__.__name__}{self.__to_set()}' | def __repr__(self) -> str:
warnings.warn(cast(str, InefficientAccessWarning.__doc__), InefficientAccessWarning)
return f'{self.__class__.__name__}{self.__to_set()}'<|docstring|>Return the string representation of the RedisSet. O(n)<|endoftext|> |
b70db3fe97593c934f89312cb833d29492ebf1c8043019e3c7a8b0c57a080bb9 | def pop(self) -> JSONTypes:
'Remove and return an element from the RedisSet(). O(1)'
encoded_value = self.redis.spop(self.key)
if (encoded_value is None):
raise KeyError('pop from an empty set')
value = self._decode(cast(bytes, encoded_value))
return value | Remove and return an element from the RedisSet(). O(1) | pottery/set.py | pop | sthagen/pottery | 1 | python | def pop(self) -> JSONTypes:
encoded_value = self.redis.spop(self.key)
if (encoded_value is None):
raise KeyError('pop from an empty set')
value = self._decode(cast(bytes, encoded_value))
return value | def pop(self) -> JSONTypes:
encoded_value = self.redis.spop(self.key)
if (encoded_value is None):
raise KeyError('pop from an empty set')
value = self._decode(cast(bytes, encoded_value))
return value<|docstring|>Remove and return an element from the RedisSet(). O(1)<|endoftext|> |
361a69e912ae705915f7683ef1efbb04dcb546c93271e1469a59934e677b5c1d | def remove(self, value: JSONTypes) -> None:
'Remove an element from the RedisSet(). O(1)'
encoded_value = self._encode(value)
if (not self.redis.srem(self.key, encoded_value)):
raise KeyError(value) | Remove an element from the RedisSet(). O(1) | pottery/set.py | remove | sthagen/pottery | 1 | python | def remove(self, value: JSONTypes) -> None:
encoded_value = self._encode(value)
if (not self.redis.srem(self.key, encoded_value)):
raise KeyError(value) | def remove(self, value: JSONTypes) -> None:
encoded_value = self._encode(value)
if (not self.redis.srem(self.key, encoded_value)):
raise KeyError(value)<|docstring|>Remove an element from the RedisSet(). O(1)<|endoftext|> |
cbe6f6773d86c1b6f77fef70f7fdd76716c53a21b409268fe3e91aa98816f4b3 | def isdisjoint(self, other: Iterable[Any]) -> bool:
'Return True if two sets have a null intersection. O(n)'
return (not self.__intersection(other)) | Return True if two sets have a null intersection. O(n) | pottery/set.py | isdisjoint | sthagen/pottery | 1 | python | def isdisjoint(self, other: Iterable[Any]) -> bool:
return (not self.__intersection(other)) | def isdisjoint(self, other: Iterable[Any]) -> bool:
return (not self.__intersection(other))<|docstring|>Return True if two sets have a null intersection. O(n)<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.