func_code_string stringlengths 52 1.94M | func_documentation_string stringlengths 1 47.2k |
|---|---|
def num_coll_reqd(DIM_FRACTAL, material, DiamTarget):
return DIM_FRACTAL * np.log2(DiamTarget/material.Diameter) | Return the number of doubling collisions required.
Calculates the number of doubling collisions required to produce
a floc of diameter DiamTarget. |
def sep_dist_floc(ConcAluminum, ConcClay, coag, material,
DIM_FRACTAL, DiamTarget):
return (material.Diameter
* (np.pi/(6
* frac_vol_floc_initial(ConcAluminum, ConcClay,
coag, material)
))**(... | Return separation distance as a function of floc size. |
def frac_vol_floc(ConcAluminum, ConcClay, coag, DIM_FRACTAL,
material, DiamTarget):
return (frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material)
* (DiamTarget / material.Diameter)**(3-DIM_FRACTAL)
) | Return the floc volume fraction. |
def dens_floc_init(ConcAluminum, ConcClay, coag, material):
return (conc_floc(ConcAluminum, ConcClay, coag).magnitude
/ frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material)
) | Return the density of the initial floc.
Initial floc is made primarily of the primary colloid and nanoglobs. |
def ratio_area_clay_total(ConcClay, material, DiamTube, RatioHeightDiameter):
return (1
/ (1
+ (2 * material.Diameter
/ (3 * DiamTube * ratio_clay_sphere(RatioHeightDiameter)
* (ConcClay / material.Density)
)
... | Return the surface area of clay normalized by total surface area.
Total surface area is a combination of clay and reactor wall
surface areas. This function is used to estimate how much coagulant
actually goes to the clay.
:param ConcClay: Concentration of clay in suspension
:type ConcClay: float
... |
def gamma_coag(ConcClay, ConcAluminum, coag, material,
DiamTube, RatioHeightDiameter):
return (1 - np.exp((
(-frac_vol_floc_initial(ConcAluminum, 0*u.kg/u.m**3, coag, material)
* material.Diameter)
/ (frac_vol_floc_initial(0... | Return the coverage of clay with nanoglobs.
This function accounts for loss to the tube flocculator walls
and a poisson distribution on the clay given random hits by the
nanoglobs. The poisson distribution results in the coverage only
gradually approaching full coverage as coagulant dose increases.
... |
def gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag):
return min(((ConcNatOrgMat / conc_precipitate(ConcAl, coag).magnitude)
* (coag.Density / NatOrgMat.Density)
* (coag.Diameter / (4 * NatOrgMat.Diameter))
),
1) | Return the fraction of the coagulant that is coated with humic acid.
:param ConcAl: Concentration of alumninum in solution
:type ConcAl: float
:param ConcNatOrgMat: Concentration of natural organic matter in solution
:type ConcNatOrgMat: float
:param NatOrgMat: type of natural organic matter, e.g. ... |
def pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat,
coag, material, RatioHeightDiameter):
return (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube,
RatioHeightDiameter)
* (1 - gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat,
... | Return the fraction of the surface area that is covered with coagulant
that is not covered with humic acid.
:param DiamTube: Diameter of the dosing tube
:type Diamtube: float
:param ConcClay: Concentration of clay in solution
:type ConcClay: float
:param ConcAl: Concentration of alumninum in so... |
def dens_floc(ConcAl, ConcClay, DIM_FRACTAL, DiamTarget, coag, material, Temp):
WaterDensity = pc.density_water(Temp).magnitude
return ((dens_floc_init(ConcAl, ConcClay, coag, material).magnitude
- WaterDensity
)
* (material.Diameter / DiamTarget)**(3 - DIM_FRACTAL)
... | Calculate floc density as a function of size. |
def vel_term_floc(ConcAl, ConcClay, coag, material, DIM_FRACTAL,
DiamTarget, Temp):
WaterDensity = pc.density_water(Temp).magnitude
return (((pc.gravity.magnitude * material.Diameter**2)
/ (18 * PHI_FLOC * pc.viscosity_kinematic(Temp).magnitude)
)
* (... | Calculate floc terminal velocity. |
def diam_floc_vel_term(ConcAl, ConcClay, coag, material,
DIM_FRACTAL, VelTerm, Temp):
WaterDensity = pc.density_water(Temp).magnitude
return (material.Diameter * (((18 * VelTerm * PHI_FLOC
* pc.viscosity_kinematic(Temp).magnitude
)
... | Calculate floc diamter as a function of terminal velocity. |
def time_col_laminar(EnergyDis, Temp, ConcAl, ConcClay, coag, material,
DiamTarget, DiamTube, DIM_FRACTAL, RatioHeightDiameter):
return (((1/6) * ((6/np.pi)**(1/3))
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material) ** (-2/3)
* (pc.viscosity_kinematic(Temp)... | Calculate single collision time for laminar flow mediated collisions.
Calculated as a function of floc size. |
def time_col_turbulent(EnergyDis, ConcAl, ConcClay, coag, material,
DiamTarget, DIM_FRACTAL):
return((1/6) * (6/np.pi)**(1/9) * EnergyDis**(-1/3) * DiamTarget**(2/3)
* frac_vol_floc_initial(ConcAl, ConcClay, coag, material)**(-8/9)
* (DiamTarget / material.Diameter)... | Calculate single collision time for turbulent flow mediated collisions.
Calculated as a function of floc size. |
def diam_kolmogorov(EnergyDis, Temp, ConcAl, ConcClay, coag, material,
DIM_FRACTAL):
return (material.Diameter
* ((eta_kolmogorov(EnergyDis, Temp).magnitude / material.Diameter)
* ((6 * frac_vol_floc_initial(ConcAl, ConcClay, coag, material))
/ n... | Return the size of the floc with separation distances equal to
the Kolmogorov length and the inner viscous length scale. |
def dean_number(PlantFlow, IDTube, RadiusCoil, Temp):
return (reynolds_rapid_mix(PlantFlow, IDTube, Temp)
* (IDTube / (2 * RadiusCoil))**(1/2)
) | Return the Dean Number.
The Dean Number is a dimensionless parameter that is the unfortunate
combination of Reynolds and tube curvature. It would have been better
to keep the Reynolds number and define a simple dimensionless geometric
parameter. |
def g_coil(FlowPlant, IDTube, RadiusCoil, Temp):
return (g_straight(FlowPlant, IDTube).magnitude
* (1 + 0.033 *
np.log10(dean_number(FlowPlant, IDTube, RadiusCoil, Temp)
) ** 4
) ** (1/2)
) | We need a reference for this.
Karen's thesis likely has this equation and the reference. |
def g_time_res(FlowPlant, IDTube, RadiusCoil, LengthTube, Temp):
return (g_coil(FlowPlant, IDTube, RadiusCoil, Temp).magnitude
* time_res_tube(IDTube, LengthTube, FlowPlant).magnitude
) | G Residence Time calculated for a coiled tube flocculator. |
def define_Precip(self, diameter, density, molecweight, alumMPM):
self.PrecipDiameter = diameter
self.PrecipDensity = density
self.PrecipMolecWeight = molecweight
self.PrecipAluminumMPM = alumMPM | Define a precipitate for the chemical.
:param diameter: Diameter of the precipitate in particulate form
:type diameter: float
:param density: Density of the material (mass/volume)
:type density: float
:param molecWeight: Molecular weight of the material (mass/mole)
:type... |
def ent_tank_a(self):
# first guess planview area
a_new = 1 * u.m**2
a_ratio = 2 # set to >1+tolerance to start while loop
tolerance = 0.01
a_floc_pv = (
self.floc.vol /
(self.floc.downstream_H + (self.floc.HL / 2))
)
while a_rati... | Calculate the planview area of the entrance tank, given the volume of
the flocculator.
:returns: The planview area of the entrance tank.
:rtype: float * u.m ** 2 |
def n_lfom_rows(FLOW,HL_LFOM):
N_estimated = (HL_LFOM*np.pi/(2*width_stout(HL_LFOM,HL_LFOM)*FLOW))
variablerow = min(10,max(4,math.trunc(N_estimated.magnitude)))
# Forcing the LFOM to either have 4 or 8 rows, for design purposes
# If the hydraulic calculation finds that there should be 4 rows, then... | This equation states that the open area corresponding to one row can be
set equal to two orifices of diameter=row height. If there are more than
two orifices per row at the top of the LFOM then there are more orifices
than are convenient to drill and more than necessary for good accuracy.
Thus this rela... |
def area_lfom_orifices_top(FLOW,HL_LFOM):
return ((FLOW*width_stout(HL_LFOM*u.m,HL_LFOM*u.m-0.5*dist_center_lfom_rows(FLOW,HL_LFOM)).magnitude *
dist_center_lfom_rows(FLOW,HL_LFOM).magnitude)) | Estimate the orifice area corresponding to the top row of orifices.
Another solution method is to use integration to solve this problem.
Here we use the width of the stout weir in the center of the top row
to estimate the area of the top orifice |
def n_lfom_orifices_per_row_max(FLOW,HL_LFOM,drill_bits):
return math.floor(math.pi * (pipe.ID_SDR(
nom_diam_lfom_pipe(FLOW, HL_LFOM), design.lfom.SDR_LFOM).magnitude)
/ (orifice_diameter(FLOW, HL_LFOM, drill_bits).magnitude +
aguaclara.design.lfom.ORIFICE... | A bound on the number of orifices allowed in each row.
The distance between consecutive orifices must be enough to retain
structural integrity of the pipe. |
def height_lfom_orifices(FLOW,HL_LFOM,drill_bits):
return (np.arange((orifice_diameter(FLOW,HL_LFOM,drill_bits)*0.5),
HL_LFOM,
(dist_center_lfom_rows(FLOW,HL_LFOM)))) | Calculates the height of the center of each row of orifices.
The bottom of the bottom row orifices is at the zero elevation
point of the LFOM so that the flow goes to zero when the water height
is at zero. |
def flow_lfom_actual(FLOW,HL_LFOM,drill_bits,Row_Index_Submerged,N_LFOM_Orifices):
D_LFOM_Orifices=orifice_diameter(FLOW, HL_LFOM, drill_bits).magnitude
row_height=dist_center_lfom_rows(FLOW, HL_LFOM).magnitude
harray = (np.linspace(row_height, HL_LFOM, n_lfom_rows(FLOW, HL_LFOM))) - 0.5 * D_LFOM_Orifi... | Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the orifices
when the water is at the max level |
def round_sf(number, digits):
units = None
try:
num = number.magnitude
units = number.units
except AttributeError:
num = number
try:
if (units != None):
rounded_num = round(num, digits - int(floor(log10(abs(num)))) - 1) * units
else:
r... | Returns inputted value rounded to number of significant figures desired.
:param number: Value to be rounded
:type number: float
:param digits: number of significant digits to be rounded to.
:type digits: int |
def stepceil_with_units(param, step, unit):
counter = 0 * unit
while counter < param.to(unit):
counter += step * unit
return counter | This function returns the smallest multiple of 'step' greater than or
equal to 'param' and outputs the result in Pint units.
This function is unit-aware and functions without requiring translation
so long as 'param' and 'unit' are of the same dimensionality. |
def list_handler(HandlerResult="nparray"):
def decorate(func):
def wrapper(*args, **kwargs):
sequences = []
enumsUnitCheck = enumerate(args)
argsList = list(args)
#This for loop identifies pint unit objects and strips them
#of the... | Wraps a function to handle list inputs. |
def check_range(*args):
knownChecks = ('>0', '>=0', '0-1', '<0', '<=0', 'int', 'boolean')
for arg in args:
#Converts arg to a mutable list
arg = [*arg]
if len(arg) == 1:
#arg[1] details what range the parameter should fall within; if
#len(arg) is 1 that means... | Check whether passed paramters fall within approved ranges.
Does not return anything, but will raise an error if a parameter falls
outside of its defined range.
Input should be passed as an array of sequences, with each sequence
having three elements:
[0] is the value being checked,
[1... |
def main():
parser = argparse.ArgumentParser(
description='Performs clustering from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", d... | Runs a clusterer from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. |
def update_clusterer(self, inst):
if self.is_updateable:
javabridge.call(self.jobject, "updateClusterer", "(Lweka/core/Instance;)V", inst.jobject)
else:
logger.critical(classes.get_classname(self.jobject) + " is not updateable!") | Updates the clusterer with the instance.
:param inst: the Instance to update the clusterer with
:type inst: Instance |
def update_finished(self):
if self.is_updateable:
javabridge.call(self.jobject, "updateFinished", "()V")
else:
logger.critical(classes.get_classname(self.jobject) + " is not updateable!") | Signals the clusterer that updating with new data has finished. |
def distribution_for_instance(self, inst):
pred = self.__distribution(inst.jobject)
return javabridge.get_env().get_double_array_elements(pred) | Peforms a prediction, returning the cluster distribution.
:param inst: the Instance to get the cluster distribution for
:type inst: Instance
:return: the cluster distribution
:rtype: float[] |
def cluster_assignments(self):
array = javabridge.call(self.jobject, "getClusterAssignments", "()[D")
if array is None:
return None
else:
return javabridge.get_env().get_double_array_elements(array) | Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray |
def classes_to_clusters(self):
array = javabridge.call(self.jobject, "getClassesToClusters", "()[I")
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array) | Return the array (ordered by cluster number) of minimum error class to cluster mappings.
:return: the mappings
:rtype: ndarray |
def crossvalidate_model(cls, clusterer, data, num_folds, rnd):
return javabridge.static_call(
"Lweka/clusterers/ClusterEvaluation;", "crossValidateModel",
"(Lweka/clusterers/DensityBasedClusterer;Lweka/core/Instances;ILjava/util/Random;)D",
clusterer.jobject, data.jo... | Cross-validates the clusterer and returns the loglikelihood.
:param clusterer: the clusterer instance to evaluate
:type clusterer: Clusterer
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:par... |
def deepcopy(obj):
if isinstance(obj, JavaObject):
wrapped = True
jobject = obj.jobject
else:
wrapped = False
jobject = obj
try:
serialized = javabridge.make_instance("weka/core/SerializedObject", "(Ljava/lang/Object;)V", jobject)
jcopy = javabridge.call(... | Creates a deep copy of the JavaObject (or derived class) or JB_Object.
:param obj: the object to create a copy of
:type obj: object
:return: the copy, None if failed to copy
:rtype: object |
def read_all(filename):
array = javabridge.static_call(
"Lweka/core/SerializationHelper;", "readAll",
"(Ljava/lang/String;)[Ljava/lang/Object;",
filename)
if array is None:
return None
else:
return javabridge.get_env().get_object_array_elements(array) | Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes.
:param filename: the file with the serialized objects
:type filename: str
:return: the list of JB_OBjects
:rtype: list |
def write(filename, jobject):
if isinstance(jobject, JavaObject):
jobject = jobject.jobject
javabridge.static_call(
"Lweka/core/SerializationHelper;", "write",
"(Ljava/lang/String;Ljava/lang/Object;)V",
filename, jobject) | Serializes the object to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobject: the object to serialize
:type jobject: JB_Object or JavaObject |
def decrease_frequency(self, frequency=None):
if frequency is None:
javabridge.call(self.jobject, "decreaseFrequency", "()V")
else:
javabridge.call(self.jobject, "decreaseFrequency", "(I)V", frequency) | Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int |
def increase_frequency(self, frequency=None):
if frequency is None:
javabridge.call(self.jobject, "increaseFrequency", "()V")
else:
javabridge.call(self.jobject, "increaseFrequency", "(I)V", frequency) | Increases the frequency.
:param frequency: the frequency to increase by, 1 if None
:type frequency: int |
def consequence(self):
items = javabridge.get_collection_wrapper(
javabridge.call(self.jobject, "getConsequence", "()Ljava/util/Collection;"))
result = []
for item in items:
result.append(Item(item))
return result | Get the the consequence.
:return: the consequence, list of Item objects
:rtype: list |
def can_produce_rules(self):
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return False
return javabridge.call(self.jobject, "canProduceRules", "()Z") | Checks whether association rules can be generated.
:return: whether scheme implements AssociationRulesProducer interface and
association rules can be generated
:rtype: bool |
def association_rules(self):
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return None
return AssociationRules(
javabridge.call(self.jobject, "getAssociationRules", "()Lweka/associations/AssociationRules;")) | Returns association rules that were generated. Only if implements AssociationRulesProducer.
:return: the association rules that were generated
:rtype: AssociationRules |
def rule_metric_names(self):
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return None
return string_array_to_list(
javabridge.call(self.jobject, "getRuleMetricNames", "()[Ljava/lang/String;")) | Returns the rule metric names of the association rules. Only if implements AssociationRulesProducer.
:return: the metric names
:rtype: list |
def loader_for_file(filename):
loader = javabridge.static_call(
"weka/core/converters/ConverterUtils", "getLoaderForFile",
"(Ljava/lang/String;)Lweka/core/converters/AbstractFileLoader;", filename)
if loader is None:
return None
else:
return Loader(jobject=loader) | Returns a Loader that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the loader for
:type filename: str
:return: the assoicated loader instance or None if none found
:rtype: Loader |
def saver_for_file(filename):
saver = javabridge.static_call(
"weka/core/converters/ConverterUtils", "getSaverForFile",
"(Ljava/lang/String;)Lweka/core/converters/AbstractFileSaver;", filename)
if saver is None:
return None
else:
return Saver(jobject=saver) | Returns a Saver that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the saver for
:type filename: str
:return: the associated saver instance or None if none found
:rtype: Saver |
def save_any_file(data, filename):
saver = saver_for_file(filename)
if saver is None:
return False
else:
saver.save_file(data, filename)
return True | Determines a Saver based on the the file extension. Returns whether successfully saved.
:param filename: the name of the file to save
:type filename: str
:param data: the data to save
:type data: Instances
:return: whether successfully saved
:rtype: bool |
def ndarray_to_instances(array, relation, att_template="Att-#", att_list=None):
if len(numpy.shape(array)) != 2:
raise Exception("Number of array dimensions must be 2!")
rows, cols = numpy.shape(array)
# header
atts = []
if att_list is not None:
if len(att_list) != cols:
... | Converts the numpy matrix into an Instances object and returns it.
:param array: the numpy ndarray to convert
:type array: numpy.darray
:param relation: the name of the dataset
:type relation: str
:param att_template: the prefix to use for the attribute names, "#" is the 1-based index,
... |
def load_file(self, dfile, incremental=False):
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
self.incremental = incremental
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
"Ljava/io/F... | Loads the specified file and returns the Instances object.
In case of incremental loading, only the structure.
:param dfile: the file to load
:type dfile: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full dataset ... |
def load_url(self, url, incremental=False):
self.enforce_type(self.jobject, "weka.core.converters.URLSourcedLoader")
self.incremental = incremental
javabridge.call(self.jobject, "reset", "()V")
javabridge.call(self.jobject, "setURL", "(Ljava/lang/String;)V", str(url))
if... | Loads the specified URL and returns the Instances object.
In case of incremental loading, only the structure.
:param url: the URL to load the data from
:type url: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full ... |
def load(self):
javabridge.call(self.jobject, "reset", "()V")
return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;")) | Loads the text files from the specified directory and returns the Instances object.
In case of incremental loading, only the structure.
:return: the full dataset or the header (if incremental)
:rtype: Instances |
def save_file(self, data, dfile):
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
"Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env()... | Saves the Instances object in the specified file.
:param data: the data to save
:type data: Instances
:param dfile: the file to save the data to
:type dfile: str |
def string_array_to_list(a):
result = []
length = javabridge.get_env().get_array_length(a)
wrapped = javabridge.get_env().get_object_array_elements(a)
for i in range(length):
result.append(javabridge.get_env().get_string(wrapped[i]))
return result | Turns the Java string array into Python unicode string list.
:param a: the string array to convert
:type a: JB_Object
:return: the string list
:rtype: list |
def string_list_to_array(l):
result = javabridge.get_env().make_object_array(len(l), javabridge.get_env().find_class("java/lang/String"))
for i in range(len(l)):
javabridge.get_env().set_object_array_element(result, i, javabridge.get_env().new_string_utf(l[i]))
return result | Turns a Python unicode string list into a Java String array.
:param l: the string list
:type: list
:rtype: java string array
:return: JB_Object |
def double_matrix_to_ndarray(m):
rows = javabridge.get_env().get_object_array_elements(m)
num_rows = len(rows)
num_cols = javabridge.get_env().get_array_length(rows[0])
result = numpy.zeros(num_rows * num_cols).reshape((num_rows, num_cols))
i = 0
for row in rows:
elements = javabrid... | Turns the Java matrix (2-dim array) of doubles into a numpy 2-dim array.
:param m: the double matrix
:type: JB_Object
:return: Numpy array
:rtype: numpy.darray |
def enumeration_to_list(enm):
result = []
while javabridge.call(enm, "hasMoreElements", "()Z"):
result.append(javabridge.call(enm, "nextElement", "()Ljava/lang/Object;"))
return result | Turns the java.util.Enumeration into a list.
:param enm: the enumeration to convert
:type enm: JB_Object
:return: the list
:rtype: list |
def main():
parser = argparse.ArgumentParser(
description='Performs attribute selection from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar=... | Runs attribute selection from the command-line. Calls JVM start/stop automatically.
Use -h to see all options. |
def search(self, evaluation, data):
array = javabridge.call(
self.jobject, "search", "(Lweka/attributeSelection/ASEvaluation;Lweka/core/Instances;)[I",
evaluation.jobject, data.jobject)
if array is None:
return None
else:
javabridge.get_en... | Performs the search and returns the indices of the selected attributes.
:param evaluation: the evaluation algorithm to use
:type evaluation: ASEvaluation
:param data: the data to use
:type data: Instances
:return: the selected attributes (0-based indices)
:rtype: ndarray |
def post_process(self, indices):
array = javabridge.call(self.jobject, "postProcess", "([I)[I", indices)
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array) | Post-processes the evaluator with the selected attribute indices.
:param indices: the attribute indices list to use
:type indices: ndarray
:return: the processed indices
:rtype: ndarray |
def selected_attributes(self):
array = javabridge.call(self.jobject, "selectedAttributes", "()[I")
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array) | Returns the selected attributes from the last run.
:return: the Numpy array of 0-based indices
:rtype: ndarray |
def ranked_attributes(self):
matrix = javabridge.call(self.jobject, "rankedAttributes", "()[[D")
if matrix is None:
return None
else:
return typeconv.double_matrix_to_ndarray(matrix) | Returns the matrix of ranked attributes from the last run.
:return: the Numpy matrix
:rtype: ndarray |
def reduce_dimensionality(self, data):
if type(data) is Instance:
return Instance(
javabridge.call(
self.jobject, "reduceDimensionality",
"(Lweka/core/Instance;)Lweka/core/Instance;", data.jobject))
else:
return Ins... | Reduces the dimensionality of the provided Instance or Instances object.
:param data: the data to process
:type data: Instances
:return: the reduced dataset
:rtype: Instances |
def generate_thresholdcurve_data(evaluation, class_index):
jtc = JavaObject.new_instance("weka.classifiers.evaluation.ThresholdCurve")
pred = javabridge.call(evaluation.jobject, "predictions", "()Ljava/util/ArrayList;")
result = Instances(
javabridge.call(jtc, "getCurve", "(Ljava/util/ArrayList... | Generates the threshold curve data from the evaluation object's predictions.
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the 0-based index of the class-label to create the plot for
:type class_index: int
:return: the generated th... |
def get_thresholdcurve_data(data, xname, yname):
xi = data.attribute_by_name(xname).index
yi = data.attribute_by_name(yname).index
x = []
y = []
for i in range(data.num_instances):
inst = data.get_instance(i)
x.append(inst.get_value(xi))
y.append(inst.get_value(yi))
... | Retrieves x and y columns from of the data generated by the weka.classifiers.evaluation.ThresholdCurve
class.
:param data: the threshold curve data
:type data: Instances
:param xname: the name of the X column
:type xname: str
:param yname: the name of the Y column
:type yname: str
:ret... |
def plot_roc(evaluation, class_index=None, title=None, key_loc="lower right", outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if class_index is None:
class_index = [0]
ax = None
for cindex ... | Plots the ROC (receiver operator characteristics) curve for the given predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the list of 0-based in... |
def plot_learning_curve(classifiers, train, test=None, increments=100, metric="percent_correct",
title="Learning curve", label_template="[#] @ $", key_loc="lower right",
outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib ... | Plots a learning curve.
:param classifiers: list of Classifier template objects
:type classifiers: list of Classifier
:param train: dataset to use for the building the classifier, used for evaluating it test set None
:type train: Instances
:param test: optional dataset (or list of datasets) to use ... |
def all_packages():
establish_cache()
result = []
pkgs = javabridge.get_collection_wrapper(
javabridge.static_call(
"weka/core/WekaPackageManager", "getAllPackages", "()Ljava/util/List;"))
for pkge in pkgs:
result.append(Package(pkge))
return result | Returns a list of all packages.
:return: the list of packages
:rtype: list |
def install_package(pkge, version="Latest"):
establish_cache()
if pkge.startswith("http://") or pkge.startswith("https://"):
url = javabridge.make_instance(
"java/net/URL", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(pkge))
return not javabridge.static_call(
... | The list of packages to install.
:param pkge: the name of the repository package, a URL (http/https) or a zip file
:type pkge: str
:param version: in case of the repository packages, the version
:type version: str
:return: whether successfully installed
:rtype: bool |
def is_installed(name):
pkgs = installed_packages()
for pkge in pkgs:
if pkge.name == name:
return True
return False | Checks whether a package with the name is already installed.
:param name: the name of the package
:type name: str
:return: whether the package is installed
:rtype: bool |
def dependencies(self):
result = []
dependencies = javabridge.get_collection_wrapper(
javabridge.call(self.jobject, "getDependencies", "()Ljava/util/List;"))
for dependency in dependencies:
result.append(Dependency(dependency))
return result | Returns the dependencies of the package.
:return: the list of Dependency objects
:rtype: list of Dependency |
def check_constraint(self, pkge=None, constr=None):
if not pkge is None:
return javabridge.call(
self.jobject, "checkConstraint", "(Lweka/core/packageManagement/Package;)Z", pkge.jobject)
if not constr is None:
return javabridge.call(
self... | Checks the constraints.
:param pkge: the package to check
:type pkge: Package
:param constr: the package constraint to check
:type constr: PackageConstraint |
def custom_properties(self, props):
fprops = javabridge.make_instance("java/io/File", "(Ljava/lang/String;)V", props)
javabridge.call(self.jobject, "setCustomPropsFile", "(Ljava/io/File;)V", fprops) | Sets the custom properties file to use.
:param props: the props file
:type props: str |
def retrieve_instances(self, query=None):
if query is None:
data = javabridge.call(self.jobject, "retrieveInstances", "()Lweka/core/Instances;")
else:
data = javabridge.call(self.jobject, "retrieveInstances", "(Ljava/lang/String;)Lweka/core/Instances;")
return In... | Executes either the supplied query or the one set via options (or the 'query' property).
:param query: query to execute if not the currently set one
:type query: str
:return: the generated dataq
:rtype: Instances |
def owner(self):
obj = javabridge.call(self.jobject, "getOwner", "()Lweka/core/CapabilitiesHandler;")
if obj is None:
return None
else:
return JavaObject(jobject=obj) | Returns the owner of these capabilities, if any.
:return: the owner, can be None
:rtype: JavaObject |
def dependencies(self):
result = []
iterator = javabridge.iterate_java(javabridge.call(self.jobject, "dependencies", "()Ljava/util/Iterator;"))
for c in iterator:
result.append(Capability(c))
return result | Returns all the dependencies.
:return: the dependency list
:rtype: list |
def for_instances(cls, data, multi=None):
if multi is None:
return Capabilities(javabridge.static_call(
"weka/core/Capabilities", "forInstances",
"(Lweka/core/Instances;)Lweka/core/Capabilities;", data.jobject))
else:
return Capabilities(j... | returns a Capabilities object specific for this data. The minimum number of instances is not set, the check
for multi-instance data is optional.
:param data: the data to generate the capabilities for
:type data: Instances
:param multi: whether to check the structure, too
:type m... |
def scatter_plot(data, index_x, index_y, percent=100.0, seed=1, size=50, title=None, outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
# create subsample
data = plot.create_subsample(data, percent=percen... | Plots two attributes against each other.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param data: the dataset
:type data: Instances
:param index_x: the 0-based index of the attribute on the x axis
:type index_x: int
:param index_y: the 0-based index of th... |
def line_plot(data, atts=None, percent=100.0, seed=1, title=None, outfile=None, wait=True):
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
# create subsample
data = plot.create_subsample(data, percent=percent, seed=seed)
f... | Uses the internal format to plot the dataset, one line per instance.
:param data: the dataset
:type data: Instances
:param atts: the list of 0-based attribute indices of attributes to plot
:type atts: list
:param percent: the percentage of the dataset to use for plotting
:type percent: float
... |
def filter(self, data):
if isinstance(data, list):
result = []
for d in data:
result.append(Instances(javabridge.static_call(
"Lweka/filters/Filter;", "useFilter",
"(Lweka/core/Instances;Lweka/filters/Filter;)Lweka/core/Ins... | Filters the dataset(s). When providing a list, this can be used to create compatible train/test sets,
since the filter only gets initialized with the first dataset and all subsequent datasets get transformed
using the same setup.
NB: inputformat(Instances) must have been called beforehand.
... |
def filters(self):
objects = javabridge.get_env().get_object_array_elements(
javabridge.call(self.jobject, "getFilters", "()[Lweka/filters/Filter;"))
result = []
for obj in objects:
result.append(Filter(jobject=obj))
return result | Returns the list of base filters.
:return: the filter list
:rtype: list |
def filters(self, filters):
obj = []
for fltr in filters:
obj.append(fltr.jobject)
javabridge.call(self.jobject, "setFilters", "([Lweka/filters/Filter;)V", obj) | Sets the base filters.
:param filters: the list of base filters to use
:type filters: list |
def generate_help(self):
result = []
result.append(self.__class__.__name__)
result.append(re.sub(r'.', '=', self.__class__.__name__))
result.append("")
result.append("Supported value names:")
for a in self.allowed:
result.append(a)
return '\n'... | Generates a help string for this container.
:return: the help string
:rtype: str |
def post_execute(self):
result = super(Transformer, self).post_execute()
if result is None:
self._input = None
return result | Gets executed after the actual execution.
:return: None if successful, otherwise error message
:rtype: str |
def quickinfo(self):
return "incremental: " + str(self.config["incremental"]) \
+ ", custom: " + str(self.config["use_custom_loader"]) \
+ ", loader: " + base.to_commandline(self.config["custom_loader"]) | Returns a short string describing some of the options of the actor.
:return: the info, None if not available
:rtype: str |
def fix_config(self, options):
opt = "incremental"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to load the dataset incrementally (bool)."
opt = "use_custom_loader"
if opt not in options:
... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def check_input(self, token):
if token is None:
raise Exception(self.full_name + ": No token provided!")
if isinstance(token.payload, str):
return
raise Exception(self.full_name + ": Unhandled class: " + classes.get_classname(token.payload)) | Performs checks on the input token. Raises an exception if unsupported.
:param token: the token to check
:type token: Token |
def do_execute(self):
fname = str(self.input.payload)
if not os.path.exists(fname):
return "File '" + fname + "' does not exist!"
if not os.path.isfile(fname):
return "Location '" + fname + "' is not a file!"
if self.resolve_option("use_custom_loader"):
... | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str |
def output(self):
if self._iterator is not None:
try:
inst = self._iterator.next()
result = Token(inst)
except Exception as e:
self._iterator = None
result = None
else:
result = super(LoadDataset... | Returns the next available output token.
:return: the next token, None if none available
:rtype: Token |
def stop_execution(self):
super(LoadDataset, self).stop_execution()
self._loader = None
self._iterator = None | Triggers the stopping of the object. |
def wrapup(self):
self._loader = None
self._iterator = None
super(LoadDataset, self).wrapup() | Finishes up after execution finishes, does not remove any graphical output. |
def fix_config(self, options):
options = super(SetStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The storage value name for storing the payload under ... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def do_execute(self):
if self.storagehandler is None:
return "No storage handler available!"
self.storagehandler.storage[self.resolve_option("storage_name")] = self.input.payload
self._output.append(self.input)
return None | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str |
def fix_config(self, options):
options = super(DeleteStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to delete (string)."... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def fix_config(self, options):
options = super(InitStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to delete (string)."
... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def do_execute(self):
if self.storagehandler is None:
return "No storage handler available!"
self.storagehandler.storage[self.resolve_option("storage_name")] = eval(str(self.resolve_option("value")))
self._output.append(self.input)
return None | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str |
def fix_config(self, options):
options = super(UpdateStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to update (string)."... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def do_execute(self):
if self.storagehandler is None:
return "No storage handler available!"
expr = str(self.resolve_option("expression")).replace(
"{X}", str(self.storagehandler.storage[str(self.resolve_option("storage_name"))]))
expr = self.storagehandler.expan... | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str |
def fix_config(self, options):
options = super(MathExpression, self).fix_config(options)
opt = "expression"
if opt not in options:
options[opt] = "{X}"
if opt not in self.help:
self.help[opt] = "The mathematical expression to evaluate (string)."
r... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def do_execute(self):
expr = str(self.resolve_option("expression"))
expr = expr.replace("{X}", str(self.input.payload))
self._output.append(Token(eval(expr)))
return None | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str |
def fix_config(self, options):
options = super(ClassSelector, self).fix_config(options)
opt = "index"
if opt not in options:
options[opt] = "last"
if opt not in self.help:
self.help[opt] = "The class index (1-based number); 'first' and 'last' are accepted... | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.