index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
4,192
|
DiamondLightSource/islatu
|
refs/heads/master
|
/tests/unit/test_region.py
|
"""
This module tests the simple islatu.region module's Region class.
"""
from islatu.region import Region
def test_region_attr_access(region_01: Region):
"""
Make sure that we can access a region's start and end attributes.
"""
assert region_01.x_start == 1056
assert region_01.x_end == 1124
assert region_01.y_start == 150
assert region_01.y_end == 250
def test_region_instantiation():
"""
Make sure that regions correctly set their end to be after their start.
"""
region = Region(2, 1, 4, 3)
assert region.x_start == 1
assert region.x_end == 2
assert region.y_start == 3
assert region.y_end == 4
def test_region_length(region_01: Region):
"""
Make sure that regions have the correct length.
"""
assert region_01.x_length == 1124 - 1056
assert region_01.y_length == 250 - 150
def test_region_num_pixels(region_01: Region):
"""
Make sure that regions are correctly calculating the number of pixels
contained in them.
"""
assert region_01.num_pixels == (1124 - 1056)*(250 - 150)
def test_region_equality(region_01: Region):
"""
Make sure that out __eq__ method is working.
"""
assert Region(1056, 1124, 150, 250) == region_01
|
{"/src/islatu/io.py": ["/src/islatu/scan.py", "/src/islatu/image.py", "/src/islatu/data.py", "/src/islatu/region.py", "/src/islatu/debug.py", "/src/islatu/metadata.py"], "/src/islatu/background.py": ["/src/islatu/region.py", "/src/islatu/image.py"], "/src/islatu/refl_profile.py": ["/src/islatu/scan.py", "/src/islatu/stitching.py", "/src/islatu/data.py"], "/src/islatu/stitching.py": ["/src/islatu/scan.py"]}
|
4,193
|
DiamondLightSource/islatu
|
refs/heads/master
|
/tests/system/_test_runner.py
|
"""
This module tests the islatu.runner module's processing capabilities.
"""
import os
import numpy as np
from islatu.runner import i07reduce
def test_i07reduce_dcd(tmp_path, path_to_resources, old_dcd_data):
"""
Tests the i07reduce function with DCD data.
"""
# Do the reduction.
run_numbers = range(404875, 404883)
yaml_file = os.path.join(path_to_resources, "dcd.yaml")
i07reduce(run_numbers, yaml_file, path_to_resources, filename=tmp_path)
# Make sure that the saved data is correct.
reduced_data = np.loadtxt(os.path.join(tmp_path, os.listdir(tmp_path)[0]))
assert np.allclose(reduced_data[0], old_dcd_data[0], 1e-3)
assert np.allclose(reduced_data[1], old_dcd_data[1], 1e-3)
assert np.allclose(reduced_data[2], old_dcd_data[2], 1e-3)
|
{"/src/islatu/io.py": ["/src/islatu/scan.py", "/src/islatu/image.py", "/src/islatu/data.py", "/src/islatu/region.py", "/src/islatu/debug.py", "/src/islatu/metadata.py"], "/src/islatu/background.py": ["/src/islatu/region.py", "/src/islatu/image.py"], "/src/islatu/refl_profile.py": ["/src/islatu/scan.py", "/src/islatu/stitching.py", "/src/islatu/data.py"], "/src/islatu/stitching.py": ["/src/islatu/scan.py"]}
|
4,194
|
DiamondLightSource/islatu
|
refs/heads/master
|
/CLI/process_xrr.py
|
#!/usr/bin/env python3
"Command line interface for the Islatu library."
import argparse
import os
if __name__ == "__main__":
# First deal with the parsing of the command line arguments using the
# argparse library.
HELP_STR = (
"Command line interface to the Islatu library's autoprocessing " +
"functionality."
)
parser = argparse.ArgumentParser(description=HELP_STR)
# The most important argument is the path to the data. If this is not
# provided, we'll assume that we're in the data directory. Note that the
# default argument is never passed to add_argument because the default
# behaviour implemented here is too complex in some cases to be replaced by
# simple hardcoded values. Instead, default values are calculated after
# parse_args is called.
HELP_STR = (
"Path to the directory in which the data is stored. If this " +
"is not specified, your current directory will be used."
)
parser.add_argument("-d", "--data_path", help=HELP_STR)
HELP_STR = (
"Path to the .yaml recipe file. " +
"If this is not specified, this module will search your data " +
"directory, and data_path/processing/, for a .yaml file."
)
parser.add_argument("-y", "--yaml_path", help=HELP_STR)
HELP_STR = (
"Use this flag if you are on site in diamond and would like your " +
"data to be processed on a cluster. (19/10/2021) Note: this is " +
"currently finicky; if you *need* to get this to work email " +
"richard.brearton@diamond.ac.uk"
)
parser.add_argument("-c", "--cluster", help=HELP_STR, action="store_true")
HELP_STR = (
"Specify the first scan number to process. If this is not specified, " +
"no lower bound on scan number will be placed on scans found in the " +
"data directory. If neither lower nor upper bounds are placed, all " +
"scans found in the data directory will be used to construct a profile."
)
parser.add_argument("-l", "--lower_bound", help=HELP_STR, type=int)
HELP_STR = (
"Specify the final scan number to process. If this is not specified, " +
"no upper bound will be placed on scan number for scans found in the " +
"data directory."
)
parser.add_argument("-u", "--upper_bound", help=HELP_STR, type=int)
HELP_STR = (
"Directly specify the scan numbers to be used to construct the " +
"profile. Simply sequentially list the scan numbers. Example usage: " +
"python3 process_xrr.py --scan_numbers 401320 401321 401324 401326 " +
"-d data/ -o processed_curves/. This argument overwrites -l and -u."
)
parser.add_argument("-N", "--scan_numbers",
help=HELP_STR, nargs='*', type=int)
HELP_STR = (
"Specify the directory in which you would like your processed " +
"reflectivity curve to be stored. Defaults to data_path/processing/"
)
parser.add_argument("-o", "--output", help=HELP_STR)
HELP_STR = (
"""
Specify a list of scans whose q values should be limited, as well as the
corresponding acceptable minimum and maximum q-values. For example:
-Q 413243 0 0.4 413244 0.3 0.6 413248 0.8 inf
Would ignore any q-values higher than 0.4 in scan 413243, would
ignore any q-values smaller than 0.3 or larger than 0.6 in scan number
413244, and would ignore any q-values lower than 0.8 present in scan
number 413248. As implied in the example, a value of 0 indicates
"no lower limit" and a value of inf indicates "no upper limit". In
general, the numbers "413243" etc. given above must be unique to the
name of the file from which the scan was parsed.
"""
)
parser.add_argument("-Q", "--limit_q",
help=HELP_STR, nargs='*', type=str)
HELP_STR = (
"Specify a list of "
)
# A switch to allow verbosity toggle.
HELP_STR = "Increase output verbosity. -v = verbose, -vv = very verbose!"
parser.add_argument("-v", "--verbose", help=HELP_STR, action="count")
# Extract the arguments from the parser.
args = parser.parse_args()
# Now we can import islatu. We need to do this after parsing args so that
# the -h/--help option doesn't get slowed down by bad load times in hdf5/
# nexusformat libs.
from islatu.runner import i07reduce
from islatu.debug import debug
# Now we need to generate default values of inputs, where required.
# Default to local dir.
if args.data_path is None:
args.data_path = os.getcwd()
# Default to data_path/processing/.
args.processing_path = os.path.join(args.data_path, "processing")
# Default to smallest possible scan number (0).
if args.lower_bound is None:
args.lower_bound = 0
# Make a number that will always be bigger than all other numbers.
if args.upper_bound is None:
args.upper_bound = float('inf')
# Output should be stored in the processing directory by default.
if args.output is None:
args.output = args.processing_path
if args.verbose is None:
args.verbose = 0
# Set islatu's logger to requested verbosity.
debug.logging_lvl = args.verbose
# Now it's time to prepare to do some XRR reduction. If the user is in
# diamond and wants to use a cluster, then we should go ahead and do that.
if args.cluster:
raise NotImplementedError(
"Islatu currently only runs locally. If cluster submission is " +
"necessary, please contact richard.brearton@diamond.ac.uk"
)
# If execution reaches here, we're processing the scan locally. First look
# for the .yaml file if we weren't explicitly told where it is.
if args.yaml_path is None:
debug.log("Searching for .yaml files in '" + args.data_path +
"' and '" + args.processing_path + "'.")
# Search in both the processing directory and the data directory.
files = []
# Only check in the processing directory if it actually exists.
if os.path.exists(args.processing_path):
files.extend([args.processing_path + x
for x in os.listdir(args.processing_path)])
# The data_path should definitely exist. If it doesn't, we shouldn't be
# unhappy about an error being raised at this point.
files.extend(os.listdir(args.data_path))
# Work out which of these files are .yaml files.
yaml_files = [x for x in files if x.endswith(".yaml")]
debug.log(".yaml files found: " + str(yaml_files))
# If we didn't find exactly one .yaml file, complain.
if len(yaml_files) != 1:
generic_err_str = (
"Could not uniquely determine location of .yaml file.\n" +
"Searched directories " + args.processing_path + " and " +
args.data_path + ".\n" + "Hoped to find exactly one file, " +
"but found " + str(len(yaml_files)) + ". "
)
if len(yaml_files) > 1:
generic_err_str += "Names of found files are: " + \
str(yaml_files) + "."
raise FileNotFoundError(generic_err_str)
else:
# We only found one .yaml, so that's our guy.
args.yaml_path = yaml_files[0]
# If execution reaches here, we've successfully found the .yaml file.
# Next lets try to work out what scan numbers are in the data directory if
# we weren't told explicitly which scan numbers we should be looking at.
if args.scan_numbers is None:
debug.log(
"Scan numbers not explicitly given. Searching for scans " +
"in directory " + args.data_path + "."
)
# Grab every valid looking nexus file in the directory.
nexus_files = [x for x in os.listdir(
args.data_path) if x.endswith(".nxs")]
# Make noise if we didn't find any .nxs files.
generic_cant_find_nxs = (
"Couldn't find any nexus (.nxs) files in the data directory '" +
args.data_path
)
if len(nexus_files) == 0:
raise FileNotFoundError(
generic_cant_find_nxs + "'."
)
# So, we found some .nxs files. Now lets grab the scan numbers from
# these files.
debug.log("Scans located: " + str(nexus_files))
nexus_files = [int(x.replace(".nxs", '').replace("i07-", ''))
for x in nexus_files]
# Now select the subset of these scan numbers that lies within the
# closed interval [args.lower_bound, args.upper_bound].
args.scan_numbers = [x for x in nexus_files if
x >= args.lower_bound and x <= args.upper_bound]
debug.log("Scan numbers found: " + str(args.scan_numbers) + ".", 2)
# Make sure we found some scans.
if len(args.scan_numbers) == 0:
raise FileNotFoundError(
generic_cant_find_nxs +
" whose scan numbers were greater than or equal to " +
str(args.lower_bound) +
" and less than or equal to " + str(args.upper_bound) + "."
)
if args.limit_q is not None:
if len(args.limit_q) % 3 != 0:
raise ValueError(
f"""
--limit_q must have a number of arguments passed to it that is
a multiple of three. Instead, {len(args.limit_q)} arguments were
found. Please use the pattern:
-L N1 qmin1 qmax1 N2 qmin2 qmax2 ...
where N1 is a scan number, qmin1 is the minimum q for the
scan with scan number N1, and qmax1 is the maximum acceptable q
for the scan with scan number N1, etc.. Please refer to the
--help for more information.
"""
)
# Okay, this is presumably properly formatted. Lets turn this into a
# list of dictionaries that we can pass directly to the
# profile.subsample_q method.
q_subsample_dicts = []
for i, _ in enumerate(args.limit_q):
if i % 3 == 0:
# We're on a new scan, so we'll need a new subsample dict.
q_subsample_dicts.append({})
# Now grab that dict we just created and give it our new scan
# index. Note that if i%3 != 0, then we can skip the creation
# of a new dictionary.
q_subsample_dicts[-1]['scan_ID'] = args.limit_q[i]
elif i % 3 == 1:
# Convert every 2nd and 3rd value to a float – these will be
# our q limits.
args.limit_q[i] = float(args.limit_q[i])
q_subsample_dicts[-1]['q_min'] = args.limit_q[i]
elif i % 3 == 2:
# Convert every 2nd and 3rd value to a float – these will be
# our q limits.
args.limit_q[i] = float(args.limit_q[i])
q_subsample_dicts[-1]['q_max'] = args.limit_q[i]
args.limit_q = q_subsample_dicts
# If execution reaches here, we found the .yaml file and we have the scan
# numbers we'll construct the XRR curve from. This is all that we need: a
# recipe and some data; let's go ahead and process the data on this machine.
i07reduce(args.scan_numbers, args.yaml_path, args.data_path,
filename=args.output, q_subsample_dicts=args.limit_q)
|
{"/src/islatu/io.py": ["/src/islatu/scan.py", "/src/islatu/image.py", "/src/islatu/data.py", "/src/islatu/region.py", "/src/islatu/debug.py", "/src/islatu/metadata.py"], "/src/islatu/background.py": ["/src/islatu/region.py", "/src/islatu/image.py"], "/src/islatu/refl_profile.py": ["/src/islatu/scan.py", "/src/islatu/stitching.py", "/src/islatu/data.py"], "/src/islatu/stitching.py": ["/src/islatu/scan.py"]}
|
4,195
|
DiamondLightSource/islatu
|
refs/heads/master
|
/src/islatu/refl_profile.py
|
"""
A profile is a measurement resulting from a scan, or a series of scans. Profiles
are the central objects in the islatu library, containing the total reflected
intensity as a function of scattering vector data.
"""
from typing import List
from .scan import Scan
from .stitching import concatenate, rebin
from .data import Data
class Profile(Data):
"""
The object that is used to store all information relating to a reflectivity
profile.
"""
def __init__(self, data: Data, scans: List[Scan]) -> None:
super().__init__(data.intensity, data.intensity_e, data.energy,
data.theta)
self.scans = scans
@classmethod
def fromfilenames(cls, filenames, parser):
"""
Instantiate a profile from a list of scan filenames.
Args:
filenames (:py:attr:`list`):
List of files, one for each reflectometry scan. Can have length
one.
parser (:py:attr:`callable`):
Parser function for the reflectometry scan files.
"""
# Load the scans, specifying the scan axis name if necessary.
scans = [parser(filename) for filename in filenames]
# Now that the individual scans have been loaded, data needs to be
# constructed. The simplest way to do this is by concatenating the
# data from each of the constituent scans.
q_vectors, intensity, intensity_e = concatenate(scans)
# Note: we are making the implicit assumption that energy is independent
# of scan number at this point.
energy = scans[0].metadata.probe_energy
data = Data(intensity, intensity_e, energy, q_vectors=q_vectors)
return cls(data, scans)
def crop(self, crop_function, **kwargs):
"""
Calls the Class method for the :func:`~islatu.scan.Scan2D.crop`
method for each :py:class:`~Scan2D` in :py:attr:`self.scans`.
Args:
crop_function (:py:attr:`callable`): Cropping function to be used.
kwargs (:py:attr:`dict`, optional): Keyword arguments for the
cropping function. Defaults to :py:attr:`None`.
"""
for scan in self.scans:
scan.crop(crop_function, **kwargs)
self.concatenate()
def bkg_sub(self, bkg_sub_function, **kwargs):
"""
Class method for the :func:`~islatu.refl_data.Scan.bkg_sub` method for
each :py:class:`~Scan` in the list.
Args:
bkg_sub_function (:py:attr:`callable`): Background subtraction
function to be used.
kwargs (:py:attr:`dict`, optional): Keyword arguments for
the background subtraction function. Defaults to
:py:attr:`None`.
"""
# When a scan subtracts background from each of its images, its
# background subtraction function may expose information relating to the
# subtraction process. This information will be stored in bkg_sub_info.
bkg_sub_info = []
# Now just iterate over all of the scans in the profile and subtract the
# background, storing the return values in bkg_sub_info.
for scan in self.scans:
bkg_sub_info.append(scan.bkg_sub(
bkg_sub_function, **kwargs))
self.concatenate()
# Expose the optimized fit parameters for meta-analysis.
return bkg_sub_info
def subsample_q(self, scan_identifier, q_min=0, q_max=float('inf')):
"""
For the scan with identifier scan_identifier, delete all data points for
which q < q_min or q > q_max.
Args:
scan_identifier:
The scan ID of the scan to be subsampled. This must be a unique
substring of the filename from which the scan was taken. For
example, if a scan's nexus filename is i07-413244.nxs, then
a valid scan_ID would be "413244", as this string will uniquely
identify the correct scan from within the profile.
q_min:
The smallest acceptable value of q. Defaults to 0 Å.
q_max:
The largest acceptable value of q. Defaults to inf Å.
"""
for scan in self.scans:
print(scan_identifier, scan.metadata.src_path)
if scan_identifier in scan.metadata.src_path:
scan.subsample_q(q_min, q_max)
self.concatenate()
def footprint_correction(self, beam_width, sample_size):
"""
Class method for :func:`~islatu.refl_data.Scan.footprint_correction`
for each :py:class:`~Scan` in the list.
Args:
beam_width (:py:attr:`float`): Width of incident beam, in metres.
sample_size (:py:class:`uncertainties.core.Variable`): Width of
sample in the dimension of the beam, in metres.
theta (:py:attr:`float`): Incident angle, in degrees.
"""
for scan in self.scans:
scan.footprint_correction(beam_width, sample_size)
self.concatenate()
def transmission_normalisation(self):
"""
Perform the transmission correction.
"""
for scan in self.scans:
scan.transmission_normalisation()
self.concatenate()
def qdcd_normalisation(self, itp):
"""
Class method for :func:`~islatu.refl_data.Scan.qdcd_normalisation` for
each :py:class:`~Scan` in the list.
Args:
normalisation_file (:py:attr:`str`): The ``.dat`` file that
contains the normalisation data.
"""
for scan in self.scans:
scan.qdcd_normalisation(itp)
self.concatenate()
def concatenate(self):
"""
Class method for :func:`~islatu.stitching.concatenate`.
"""
self.q_vectors, self.intensity, self.intensity_e = \
concatenate(self.scans)
def rebin(self, new_q=None, rebin_as="linear", number_of_q_vectors=5000):
"""
Class method for :func:`islatu.stitching.rebin`.
Args:
new_q (:py:attr:`array_like`):
Array of potential q-values. Defaults to :py:attr:`None`. If
this argument is not specified, then the new q, R values are
binned according to rebin_as and number_of_q_vectors.
rebin_as (py:attr:`str`):
String specifying how the data should be rebinned. Options are
"linear" and "log". This is only used if the new_q are
unspecified.
number_of_q_vectors (:py:attr:`int`, optional):
The max number of q-vectors to be using initially in the
rebinning of the data. Defaults to :py:attr:`400`.
"""
self.q_vectors, self.intensity, self.intensity_e = rebin(
self.q_vectors, (self.intensity, self.intensity_e), new_q,
rebin_as=rebin_as, number_of_q_vectors=number_of_q_vectors)
|
{"/src/islatu/io.py": ["/src/islatu/scan.py", "/src/islatu/image.py", "/src/islatu/data.py", "/src/islatu/region.py", "/src/islatu/debug.py", "/src/islatu/metadata.py"], "/src/islatu/background.py": ["/src/islatu/region.py", "/src/islatu/image.py"], "/src/islatu/refl_profile.py": ["/src/islatu/scan.py", "/src/islatu/stitching.py", "/src/islatu/data.py"], "/src/islatu/stitching.py": ["/src/islatu/scan.py"]}
|
4,196
|
DiamondLightSource/islatu
|
refs/heads/master
|
/src/islatu/cropping.py
|
"""
Often the detector is a lot larger than the reflected intensity peak, so it
makes sense to crop the image to the peak.
"""
import numpy as np
from islatu.region import Region
def crop_to_region(array: np.ndarray, region: Region):
"""
Crops the input array to the input region.
Args:
array:
The array to crop.
region:
The instance of Region to crop to.
"""
return array[region.x_start:region.x_end, region.y_start:region.y_end]
|
{"/src/islatu/io.py": ["/src/islatu/scan.py", "/src/islatu/image.py", "/src/islatu/data.py", "/src/islatu/region.py", "/src/islatu/debug.py", "/src/islatu/metadata.py"], "/src/islatu/background.py": ["/src/islatu/region.py", "/src/islatu/image.py"], "/src/islatu/refl_profile.py": ["/src/islatu/scan.py", "/src/islatu/stitching.py", "/src/islatu/data.py"], "/src/islatu/stitching.py": ["/src/islatu/scan.py"]}
|
4,197
|
DiamondLightSource/islatu
|
refs/heads/master
|
/src/islatu/stitching.py
|
"""
As reflectometry measurements typically consist of multiple scans at different
attenutation, we must stitch these together.
"""
from typing import List
import numpy as np
from .scan import Scan
def concatenate(scan_list: List[Scan]):
"""
Concatenate each of the datasets together.
Args:
scans:
List of reflectometry scans.
Returns:
:py:attr:`tuple`: Containing:
- q-values.
- Reflected intensities.
– Errors on reflected intensities.
"""
q_vectors = np.array([])
intensity = np.array([])
intensity_e = np.array([])
for scan in scan_list:
q_vectors = np.append(q_vectors, scan.q_vectors)
intensity = np.append(intensity, scan.intensity)
intensity_e = np.append(intensity_e, scan.intensity_e)
return q_vectors, intensity, intensity_e
def rebin(q_vectors, reflected_intensity, new_q=None, rebin_as="linear",
number_of_q_vectors=5000):
"""
Rebin the data on a linear or logarithmic q-scale.
Args:
q_vectors:
q - the current q vectors.
reflected_intensity (:py:attr:`tuple`):
(I, I_e) - The current reflected intensities, and their errors.
new_q (:py:attr:`array_like`):
Array of potential q-values. Defaults to :py:attr:`None`. If this
argument is not specified, then the new q, R values are binned
according to rebin_as and number_of_q_vectors.
rebin_as (py:attr:`str`):
String specifying how the data should be rebinned. Options are
"linear" and "log". This is only used if the new_q are unspecified.
number_of_q_vectors (:py:attr:`int`, optional):
The max number of q-vectors to be using initially in the rebinning
of the data. Defaults to :py:attr:`400`.
Returns:
:py:attr:`tuple`: Containing:
- q: rebinned q-values.
- intensity: rebinned intensities.
- intensity_e: rebinned intensity errors.
"""
# Unpack the arguments.
q = q_vectors
R, R_e = reflected_intensity
# Required so that logspace/linspace encapsulates the whole data.
epsilon = 0.001
if new_q is None:
# Our new q vectors have not been specified, so we should generate some.
if rebin_as == "log":
new_q = np.logspace(
np.log10(q[0]),
np.log10(q[-1] + epsilon), number_of_q_vectors)
elif rebin_as == "linear":
new_q = np.linspace(q.min(), q.max() + epsilon,
number_of_q_vectors)
binned_q = np.zeros_like(new_q)
binned_R = np.zeros_like(new_q)
binned_R_e = np.zeros_like(new_q)
for i in range(len(new_q)-1):
indices = []
inverse_var = []
for j in range(len(q)):
if new_q[i] <= q[j] < new_q[i + 1]:
indices.append(j)
inverse_var.append(1/float(R_e[j]**2))
# Don't bother doing maths if there were no recorded q-values between
# the two bin points we were looking at.
if len(indices) == 0:
continue
# We will be using inverse-variance weighting to minimize the variance
# of the weighted mean.
sum_of_inverse_var = np.sum(inverse_var)
# If we measured multiple qs between these bin locations, then average
# the data, weighting by inverse variance.
for j in indices:
binned_R[i] += R[j]/(R_e[j]**2)
binned_q[i] += q[j]/(R_e[j]**2)
# Divide by the sum of the weights.
binned_R[i] /= sum_of_inverse_var
binned_q[i] /= sum_of_inverse_var
# The stddev of an inverse variance weighted mean is always:
binned_R_e[i] = np.sqrt(1/sum_of_inverse_var)
# Get rid of any empty, unused elements of the array.
cleaned_q = np.delete(binned_q, np.argwhere(binned_R == 0))
cleaned_R = np.delete(binned_R, np.argwhere(binned_R == 0))
cleaned_R_e = np.delete(binned_R_e, np.argwhere(binned_R == 0))
return cleaned_q, cleaned_R, cleaned_R_e
|
{"/src/islatu/io.py": ["/src/islatu/scan.py", "/src/islatu/image.py", "/src/islatu/data.py", "/src/islatu/region.py", "/src/islatu/debug.py", "/src/islatu/metadata.py"], "/src/islatu/background.py": ["/src/islatu/region.py", "/src/islatu/image.py"], "/src/islatu/refl_profile.py": ["/src/islatu/scan.py", "/src/islatu/stitching.py", "/src/islatu/data.py"], "/src/islatu/stitching.py": ["/src/islatu/scan.py"]}
|
4,201
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/ZScore.py
|
from Calculator.Subtraction import subtraction
from Calculator.Division import division
from Statistics.Mean import get_mean
from Statistics.StandardDeviation import get_standard_deviation
def get_z_score(data):
if isinstance(data, float):
data = [data]
value_mean = get_mean(data)
z = []
for i in range(0, len(data)):
a = subtraction(value_mean, data[i])
b = division(get_standard_deviation(data), a)
z.append(b)
return z
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,202
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Tests/test_CalculatorTest.py
|
import unittest
from Calculator.Calculator import Calculator
from CsvReader.CsvReader import CsvReader
class MyTestCase(unittest.TestCase):
# default test
def setUp(self) -> None:
self.calculator = Calculator()
# instance check test
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, Calculator)
# addition method test1
def test_add_method_calculator_success(self):
self.assertEqual(self.calculator.add(1.36,2.78), 4.14)
# addition method test2
def test_add_method_calculator_zero(self):
self.assertEqual(self.calculator.add(-1.11, 1.11), 0)
# subtraction method test1
def test_subtract_method_calculator_success(self):
self.assertEqual(self.calculator.subtract(4, 10), 6)
# subtraction method test2
def test_subtract_method_calculator_zero(self):
self.assertEqual(self.calculator.subtract(4, 4), 0)
# multiplication method test1
def test_multiply_method_calculator_success(self):
self.assertEqual(self.calculator.multiply(5, 5), 25)
# multiplication method test2
def test_multiply_method_calculator_zero(self):
self.assertEqual(self.calculator.multiply(5, 0), 0)
# division method test1
def test_divide_method_calculator_success(self):
self.assertEqual(self.calculator.divide(5, 20), 4)
# division method test2
def test_divide_method_calculator_zero(self):
self.assertEqual(self.calculator.divide(5, 0), 0)
# square method test1
def test_square_method_calculator_success(self):
self.assertEqual(self.calculator.square(5), 25)
# square method test2
def test_square_method_calculator_negative(self):
self.assertEqual(self.calculator.square(-5), 25)
# square root test1
def test_square_root_method_calculator_success(self):
self.assertEqual(self.calculator.square_root(25), 5)
# square root test2 - accurate upto 9 decimal points
def test_square_root_method_calculator_success_decimal(self):
self.assertEqual(self.calculator.square_root(39.99), 6.323764702)
def test_subtraction(self):
test_data = CsvReader("Tests/Data/UnitTestSubtraction.csv").data
for row in test_data:
result_float = float(row['Result'])
self.assertEqual(self.calculator.subtract(float(row['Value 1']), float(row['Value 2'])), result_float)
result_int = int(row['Result'])
self.assertEqual(self.calculator.subtract(int(row['Value 1']), int(row['Value 2'])), result_int)
def test_addition(self):
test_data = CsvReader("Tests/Data/UnitTestAddition.csv").data
for row in test_data:
result_float = float(row['Result'])
self.assertEqual(self.calculator.add(float(row['Value 1']), float(row['Value 2'])), result_float)
result_int = int(row['Result'])
self.assertEqual(self.calculator.add(int(row['Value 1']), int(row['Value 2'])), result_int)
def test_multiplication(self):
test_data = CsvReader("Tests/Data/UnitTestMultiplication.csv").data
for row in test_data:
result_float = float(row['Result'])
self.assertEqual(self.calculator.multiply(float(row['Value 1']), float(row['Value 2'])), result_float)
result_int = int(row['Result'])
self.assertEqual(self.calculator.multiply(int(row['Value 1']), int(row['Value 2'])), result_int)
def test_division(self):
test_data = CsvReader("Tests/Data/UnitTestDivision.csv").data
for row in test_data:
result_float = float(row['Result'])
self.assertEqual(self.calculator.divide(float(row['Value 1']), float(row['Value 2'])), result_float)
result_int = float(row['Result'])
self.assertEqual(self.calculator.divide(int(row['Value 1']), int(row['Value 2'])), result_int)
def test_square(self):
test_data = CsvReader("Tests/Data/UnitTestSquare.csv").data
for row in test_data:
result_float = float(row['Result'])
self.assertEqual(self.calculator.square(float(row['Value 1'])), result_float)
result_int = int(row['Result'])
self.assertEqual(self.calculator.square(int(row['Value 1'])), result_int)
def test_square_root(self):
test_data = CsvReader("Tests/Data/UnitTestSquareRoot.csv").data
for row in test_data:
result_float = float(row['Result'])
self.assertEqual(round(self.calculator.square_root(float(row['Value 1'])) , 8), result_float)
result_int = float(row['Result'])
self.assertEqual(round(self.calculator.square_root(int(row['Value 1'])) , 8), result_int)
if __name__ == '__main__':
unittest.main()
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,203
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Tests/test_Statistics.py
|
import unittest
from numpy.random import seed
from Statistics.Statistics import Statistics
import random
import statistics
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
seed(5)
self.testData = []
for i in range(0, 10):
num = random.randint(0, 15)
self.testData.append(num)
self.mean_value = statistics.mean(self.testData)
self.median_value = statistics.median(self.testData)
self.mode_value = statistics.mode(self.testData)
self.variance_value = statistics.variance(self.testData)
self.standard_deviation_value=statistics.stdev(self.testData)
self.statistics = Statistics()
def test_instantiate_calculator(self):
self.assertIsInstance(self.statistics, Statistics)
def test_mean_calculator(self):
mean = self.statistics.stats_mean(self.testData)
self.assertEqual(mean, self.mean_value)
def test_median_calculator(self):
median = self.statistics.stats_median(self.testData)
self.assertEqual(median, self.median_value)
def test_mode_calculator(self):
mode = self.statistics.stats_mode(self.testData)
self.assertEqual(mode, self.mode_value)
def test_median_calculator(self):
median = self.statistics.stats_median(self.testData)
self.assertEqual(median, self.median_value)
def test_mode_calculator(self):
mode = self.statistics.stats_mode(self.testData)
self.assertEqual(mode, self.mode_value)
def test_variance_calculator(self):
variance = self.statistics.stats_variance(self.testData)
self.assertEqual(variance, round((self.variance_value),1))
def test_standard_deviation_calculator(self):
standard_deviation = self.statistics.stats_standard_deviation(self.testData)
self.assertEqual(standard_deviation, round((self.standard_deviation_value),1))
if __name__ == '__main__':
unittest.main()
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,204
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Calculator/SquareRoot.py
|
import math
def square_root(a):
if isinstance(a, float):
return round(math.sqrt(float(a)), 9)
elif isinstance(a, int):
return round(math.sqrt(a), 9)
else:
raise Exception("Data type not supported for square root operation!")
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,205
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/Median.py
|
from Calculator.Division import division
from Calculator.Addition import addition
def get_median(data):
num_values = len(data)
if num_values % 2 == 0:
value = int(division(2, num_values))
a = data[value]
value = value - 1
b = data[value]
c = addition(b, a)
d = division(2, c)
return d
else:
value = int(division(2, num_values))
e = data[value]
return e
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,206
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/PopulationSampler.py
|
from Statistics.RandomGenerator import RandomGenerator
from Statistics.Statistics import Statistics
import scipy.stats as st
class PopulationSampler(RandomGenerator):
def __init__(self):
self.stats = Statistics()
pass
# Simple random sampling
def get_simple_random_sampling(self, size, seed, version, data):
return self.get_rand_num_list_w_seed(size, seed, version, data)
# Confidence Interval For a Sample
def get_confidence_interval(self, data):
st1_degree_of_freedom = self.stats.subtract(len(data), 1)
st2_alpha = self.stats.divide(2, self.stats.subtract(1, 0.95))
st3 = st.t.ppf(1 - st2_alpha, df=st1_degree_of_freedom)
st4 = self.stats.divide(self.stats.square_root(len(data)), self.stats.stats_standard_deviation(data))
st5 = self.stats.multiply(st3, st4)
st6 = self.stats.subtract(self.stats.stats_mean(data), st5)
st7 = self.stats.add(self.stats.stats_mean(data), st5)
conf_interval = [st6, st7]
return conf_interval
# Margin of Error
def get_margin_of_error(self, data, q):
st1_z_critical_score = st.norm.ppf(1 - (1 - q) / 2)
st2_sd = self.stats.stats_standard_deviation(data)
#st3 = self.stats.multiply(st1_z_critical_score, st2_sd)
se = self.stats.divide(self.stats.square_root(len(data)), st2_sd)
margin_of_error = self.stats.multiply(st1_z_critical_score, se)
return margin_of_error
# Cochran’s Sample Size Formula
def get_result_by_cochrans_sample_size(self, p, e, cl):
z = st.norm.ppf(1-(1-cl)/2)
print(self.stats.multiply(self.stats.multiply(self.stats.square(z), p), self.stats.square(e)))
n = self.stats.multiply(self.stats.multiply(self.stats.square(z), p), self.stats.square(e))/(1-p)
print(n)
return round(n)
# How to Find a Sample Size Given a Confidence Interval and Width (unknown population standard deviation)
def get_sample_size_by_confidence_interval_and_width(self, data):
# step 1
cl = 0.95
za_2 = st.norm.ppf(1 - (1 - cl) / 2)
print("za2 - " + str(za_2))
e = 0.5
print("e - " + e)
p = 0.5
q = 1 - p
# step 2
s2 = self.stats.multiply(p, q)
print("s2 - " + s2)
# step 3
s3 = self.stats.divide(za_2, e)
print("s3 - " + s3)
# step 4
s4 = self.stats.square(s3)
print("s4 - " + s4)
# step 5 ( final )
s5 = self.stats.multiply(s2, s4)
print("s5 - " + s5)
# this is the sample population size for an unknown population standard deviation
return s5
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,207
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Tests/test_RandomGenerator.py
|
import unittest
from Statistics.RandomGenerator import RandomGenerator
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.random_generator = RandomGenerator()
# Generate a random number without a seed between a range of two numbers - Both Integer and Decimal
def test_generate_rand_num_by_range_wo_seed(self):
self.assertLessEqual(self.random_generator.generate_rand_num_by_range_wo_seed(3, 5), 5)
self.assertGreaterEqual(self.random_generator.generate_rand_num_by_range_wo_seed(3, 5), 3)
# Generate a random number with a seed between a range of two numbers - Both Integer and Decimal
def test_generate_rand_num_by_range_w_seed(self):
self.assertLessEqual(self.random_generator.generate_rand_num_by_range_w_seed(5, 2, 3, 5), 5)
self.assertGreaterEqual(self.random_generator.generate_rand_num_by_range_w_seed(5, 2, 3, 5), 3)
# Generate a list of N random numbers with a seed and between a range of numbers - Both Integer and Decimal
def test_get_rand_num_list_by_range_w_seed(self):
data = self.random_generator.get_rand_num_list_by_range_w_seed(5, 2, 2, 1, 9)
for num in range(len(data)):
self.assertLessEqual(data[num], 9)
self.assertGreaterEqual(data[num], 1)
# Set a seed and randomly.select the same value from a list
def test_set_seed_and_get_rand_from_list(self):
self.assertEqual(self.random_generator.set_seed_and_get_rand_from_list(3, 2, [5, 3, 2, 1, 9]), 3)
# Select a random item from a list
def test_get_rand_item_from_list(self):
self.assertTrue(self.random_generator.get_rand_item_from_list([5, 3, 2, 1, 9]) in [5, 3, 2, 1, 9])
# Select N number of items from a list without a seed
def test_get_rand_num_list_wo_seed(self):
self.assertEqual(len(self.random_generator.get_rand_num_list_wo_seed(3, [5, 3, 2, 1, 9])), 3)
self.assertTrue(set(self.random_generator.get_rand_num_list_wo_seed(3, [5, 3, 2, 1, 9])).issubset(set([5, 3, 2, 1, 9])))
# Select N number of items from a list with a seed
def test_get_rand_num_list_w_seed(self):
self.assertEqual(len(self.random_generator.get_rand_num_list_w_seed(3, 2, 2, [5, 3, 2, 1, 9])), 3)
self.assertTrue(set(self.random_generator.get_rand_num_list_w_seed(3, 2, 2, [5, 3, 2, 1, 9])).issubset(set([5, 3, 2, 1, 9])))
if __name__ == '__main__':
unittest.main()
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,208
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/Variance.py
|
from Statistics.Mean import get_mean
from Calculator.Division import division
from Calculator.Addition import addition
from Calculator.Square import square
from Calculator.Subtraction import subtraction
def get_variance(data):
x1 = get_mean(data)
num_values = len(data)
total = 0
total1 = 0
data1 = []
for i in range(0, len(data)):
a = data[i - 1]
total_sum = subtraction(a, x1)
total = square(total_sum)
data1.append(total)
for i in range(0, len(data1)):
total1 = total1 + addition(0, data1[i])
return round(division(num_values - 1, total1), 1)
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,209
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Calculator/Division.py
|
def division(a, b):
result = 0
try:
if (isinstance(a, int) and isinstance(b, int)) or (isinstance(a, float) and isinstance(b, int)) or (
isinstance(a, int) and isinstance(b, float)):
result = round(float(b) / float(a), 9)
elif isinstance(a, float) and isinstance(b, float):
result = round(int(b) / int(a), 9)
else:
raise Exception("Data type not supported for division operation!")
except ZeroDivisionError:
raise Exception("Divide by Zero error")
return result
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,210
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Calculator/Multiplication.py
|
def multiplication(a, b):
if (isinstance(a, float) and isinstance(b, float)) or (isinstance(a, float) and isinstance(b, int)) or (
isinstance(a, int) and isinstance(b, float)):
return round(float(a) * float(b), 9)
elif isinstance(a, int) and isinstance(b, int):
return int(a) * int(b)
else:
raise Exception("Data type not supported for multiplication operation!")
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,211
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/Mode.py
|
def get_mode(data):
data1 = data
maximum = data1.count(data1[0])
m = data1[0]
for i in range(1, len(data1)):
freq = data1.count(data1[i])
if freq > maximum:
maximum = freq
m = data1[i]
else:
pass
return m
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,212
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Tests/test_PopulationSampler.py
|
import unittest
from Statistics.PopulationSampler import PopulationSampler
import statistics
import scipy.stats as st
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.population_sampler = PopulationSampler()
self.data = [0, 1, 2, 5, 9, 11, 34, 55, 23, 19, 78, 99, 15]
# Simple random sampling
def test_get_simple_random_sampling(self):
self.assertEqual(len(self.population_sampler.get_simple_random_sampling(5, 5, 2, self.data)), 5)
self.assertTrue(
set(self.population_sampler.get_simple_random_sampling(5, 5, 2, self.data)).issubset(set(self.data)))
# Confidence Interval For a Sample
def test_get_confidence_interval(self):
conf_interval = st.t.interval(alpha=0.95
, df=len(self.data) - 1
, loc=statistics.mean(self.data)
, scale=st.sem(self.data)
)
ci = self.population_sampler.get_confidence_interval(self.data)
#print("Confdence interval")
#print(conf_interval)
#print(ci)
#self.assertTrue(set(conf_interval).issubset(ci))
# Margin of Error
def test_get_margin_of_error(self):
q = 0.05 # assumption
result = self.population_sampler.get_margin_of_error(self.data, q)
sd = statistics.stdev(self.data)
z = st.norm.ppf(1-(1-q)/2)
se = sd / statistics.sqrt(len(self.data))
moe = z * se
#print("Margin of Error")
#print(moe)
#print(result)
#self.assertTrue(result-moe >= 0.1)
# Cochran’s Sample Size Formula
def test_get_result_by_cochrans_sample_size(self):
# n = 100000
cl = 0.95
e = 0.05
p = 0.5
#print(self.population_sampler.get_result_by_cochrans_sample_size(p, e, cl))
# How to Find a Sample Size Given a Confidence Interval and Width (unknown population standard deviation)
"""def test_get_sample_size_by_confidence_interval_and_width(self):
print(self.population_sampler.get_sample_size_by_confidence_interval_and_width(self.data))"""
if __name__ == '__main__':
unittest.main()
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,213
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Calculator/Subtraction.py
|
def subtraction(a, b):
if (isinstance(a, float) and isinstance(b, float)) or (isinstance(a, float) and isinstance(b, int)) or (
isinstance(a, int) and isinstance(b, float)):
return float(b) - float(a)
elif isinstance(a, int) and isinstance(b, int):
return int(b) - int(a)
else:
raise Exception("Data type not supported for subtraction operation!")
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,214
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Tests/test_CsvReaderTest.py
|
import unittest
from CsvReader.CsvReader import CsvReader, class_factory
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.csv_reader = CsvReader('Tests/Data/UnitTestSubtraction.csv')
def test_return_data_as_objects(self):
value1 = self.csv_reader.return_data_as_class('Value 1')
value2 = self.csv_reader.return_data_as_class('Value 2')
result = self.csv_reader.return_data_as_class('result')
self.assertIsInstance(value1, list)
self.assertIsInstance(value2, list)
self.assertIsInstance(result, list)
test_class1 = class_factory('Value 1', self.csv_reader.data[0])
test_class2 = class_factory('Value 2', self.csv_reader.data[0])
test_class3 = class_factory('result', self.csv_reader.data[0])
for value in value1:
self.assertEqual(value.__name__, test_class1.__name__)
for value in value2:
self.assertEqual(value.__name__, test_class2.__name__)
for value in result:
self.assertEqual(value.__name__, test_class3.__name__)
if __name__ == '__main__':
unittest.main()
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,215
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/StandardDeviation.py
|
from Statistics.Variance import get_variance
from Calculator.SquareRoot import square_root
def get_standard_deviation(data):
value = get_variance(data)
return round(square_root(value),1)
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,216
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/Mean.py
|
from Calculator.Addition import addition
from Calculator.Division import division
def get_mean(data):
num_values = len(data)
total = 0
for num in data:
total = addition(total, num)
return division(num_values, total)
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,217
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Calculator/Addition.py
|
def addition(a, b):
if (isinstance(a, float) and isinstance(b, float)) or (isinstance(a, float) and isinstance(b, int)) or (isinstance(a, int) and isinstance(b, float)):
return float(a) + float(b)
elif isinstance(a, int) and isinstance(b, int):
return int(a) + int(b)
else:
raise Exception("Data type not supported for addition operation!")
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,218
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/Statistics.py
|
from Calculator.Calculator import Calculator
from Statistics.Mean import get_mean
from Statistics.Median import get_median
from Statistics.Mode import get_mode
from Statistics.Variance import get_variance
from Statistics.StandardDeviation import get_standard_deviation
from Statistics.ZScore import get_z_score
class Statistics(Calculator):
def __init__(self):
pass
def stats_mean(self, data):
self.result = get_mean(data)
return self.result
def stats_median(self, data):
data.sort()
self.result = get_median(data)
return self.result
def stats_mode(self, data):
self.result = get_mode(data)
return self.result
def stats_variance(self, data):
self.result = get_variance(data)
return self.result
def stats_standard_deviation(self, data):
self.result = get_standard_deviation(data)
return self.result
def stats_z_score(self, data):
self.result = get_z_score(data)
return self.result
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,219
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Statistics/RandomGenerator.py
|
import random
class RandomGenerator:
result = 0
def __init__(self):
pass
# Generate a random number without a seed between a range of two numbers - Both Integer and Decimal
def generate_rand_num_by_range_wo_seed(self, low, high):
self.result = random.uniform(low, high)
return self.result
# Generate a random number with a seed between a range of two numbers - Both Integer and Decimal
def generate_rand_num_by_range_w_seed(self, seed, version, low, high):
random.seed(seed, version)
self.result = random.uniform(low, high)
return self.result
# Generate a list of N random numbers with a seed and between a range of numbers - Both Integer and Decimal
def get_rand_num_list_by_range_w_seed(self, size, seed, version, low, high):
random.seed(seed, version)
try:
self.result = random.sample(range(low, high), size)
except ValueError:
print('Sample size exceeded population size.')
return self.result
# Set a seed and randomly select the same value from a list
def set_seed_and_get_rand_from_list(self, seed, version, number_list):
random.seed(seed, version)
self.result = random.choice(number_list)
return self.result
# Select a random item from a list
def get_rand_item_from_list(self, number_list):
self.result = random.choice(number_list)
return self.result
# Select N number of items from a list without a seed
def get_rand_num_list_wo_seed(self, size, number_list):
try:
self.result = random.sample(number_list, size)
except ValueError:
print('Sample size exceeded population size.')
return self.result
# Select N number of items from a list with a seed
def get_rand_num_list_w_seed(self, size, seed, version, number_list):
random.seed(seed, version)
try:
self.result = random.sample(number_list, size)
except ValueError:
print('Sample size exceeded population size.')
return self.result
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,220
|
nt27web/statistical-calculator
|
refs/heads/main
|
/Calculator/Square.py
|
def square(a):
if isinstance(a, float):
return round(float(a) * float(a), 9)
elif isinstance(a, int):
return int(a) * int(a)
else:
raise Exception("Data type not supported for square operation!")
|
{"/Statistics/ZScore.py": ["/Calculator/Subtraction.py", "/Calculator/Division.py", "/Statistics/Mean.py", "/Statistics/StandardDeviation.py"], "/Tests/test_Statistics.py": ["/Statistics/Statistics.py"], "/Statistics/Median.py": ["/Calculator/Division.py", "/Calculator/Addition.py"], "/Statistics/PopulationSampler.py": ["/Statistics/RandomGenerator.py", "/Statistics/Statistics.py"], "/Tests/test_RandomGenerator.py": ["/Statistics/RandomGenerator.py"], "/Statistics/Variance.py": ["/Statistics/Mean.py", "/Calculator/Division.py", "/Calculator/Addition.py", "/Calculator/Square.py", "/Calculator/Subtraction.py"], "/Tests/test_PopulationSampler.py": ["/Statistics/PopulationSampler.py"], "/Statistics/StandardDeviation.py": ["/Statistics/Variance.py", "/Calculator/SquareRoot.py"], "/Statistics/Mean.py": ["/Calculator/Addition.py", "/Calculator/Division.py"], "/Statistics/Statistics.py": ["/Statistics/Mean.py", "/Statistics/Median.py", "/Statistics/Mode.py", "/Statistics/Variance.py", "/Statistics/StandardDeviation.py", "/Statistics/ZScore.py"]}
|
4,224
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/admin.py
|
from django.contrib import admin
from .models import PensionEntity, Giftcard
admin.site.register(PensionEntity)
admin.site.register(Giftcard)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,225
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/migrations/0001_initial.py
|
# Generated by Django 2.1.7 on 2019-04-13 10:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_date', models.DateField(blank=True, null=True)),
('bio', models.TextField(blank=True, max_length=500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PensionFund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bsn', models.CharField(max_length=100)),
('fund_name', models.CharField(choices=[('abp', 'ABP fund'), ('pfzw', 'PFZW Fund'), ('sf', 'Some Fund'), ('gf', 'Gold Fund')], default='abp', max_length=5)),
('active', models.BooleanField(default=False)),
('ascription', models.CharField(max_length=100, null=True)),
('eligible', models.BooleanField(default=True)),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('fulltime_salary', models.FloatField(default=0.0)),
('entitlements', models.CharField(max_length=1000)),
('participant', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='mockdigid.Participant')),
],
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,226
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/models.py
|
import json
from django.contrib.auth.models import User
from django.db import models
class Participant(models.Model):
'''
Participant is the customer. They can link their Digid to the system, view
projections, etc.
'''
user = models.OneToOneField(User, on_delete=models.CASCADE)
birth_date = models.DateField(null=True, blank=True)
bio = models.TextField(max_length=500, blank=True)
def __str__(self):
return self.user.get_username()
class PensionFund(models.Model):
'''
Details of the pension fund
'''
ABP = 'abp'
PFZW = 'pfzw'
SOMEFUND = 'sf'
GOLDFUND = 'gf'
FUND_CHOICES = (
(ABP, 'ABP fund'),
(PFZW, 'PFZW Fund'),
(SOMEFUND, 'Some Fund'),
(GOLDFUND, 'Gold Fund'),
)
session_id = models.CharField(max_length=100, primary_key=True)
amount = models.FloatField(default=0.0)
bsn = models.CharField(max_length=100)
participant = models.ForeignKey(Participant, on_delete=models.CASCADE, null=True)
fund_name = models.CharField(max_length=5, choices=FUND_CHOICES, default=ABP)
active = models.BooleanField(default=False)
ascription = models.CharField(max_length=100, null=True)
eligible = models.BooleanField(default=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
fulltime_salary = models.FloatField(default=0.0)
entitlements = models.CharField(max_length=1000)
def set_entitlements(self, x):
self.entitlements = json.dumps(x)
def get_entitlements(self, x):
return json.loads(self.entitlements)
def get_web_link(self):
if self.fund_name == 'abp':
return "https://www.abp.nl"
elif self.fund_name == 'pfzw':
return "https://www.pfzw.nl"
else:
return "https://www.abp.nl"
def __str__(self):
return '{} - {} - {}'.format(self.session_id, self.fund_name, self.bsn[0:6])
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,227
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/views.py
|
import json
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from mockdigid.models import Participant, PensionFund
from giftcards.models import Giftcard
def _get_participant_pension_funds(participant):
'''
Return list of all pension funds of a participant
'''
response = []
pensions = list(PensionFund.objects.filter(participant=participant))
for pension in pensions:
response.append({
'fund_name': pension.fund_name,
'id': pension.session_id,
'active': pension.active,
'start_date': pension.start_date,
'ascription': pension.ascription,
'eligible': pension.eligible,
'fulltime_salary': pension.fulltime_salary,
'entitlements': pension.entitlements
})
return response
@api_view(['POST'])
def authenticate_digid(request):
'''
Check if user is valid (Will be replaced with BasicAuth in the future)
Example call: 127.0.0.1:8000/mockdigid/authenticate?username=TEST&password=TEST
'''
user = authenticate(username=request.query_params['username'],
password=request.query_params['password'])
try:
participant = Participant.objects.get(user=user)
except Participant.DoesNotExist:
return Response({'msg': 'Username or password is wrong'}, status=status.HTTP_401_UNAUTHORIZED)
return Response({
'first_name' : participant.user.first_name,
'last_name' : participant.user.last_name,
'bio' : participant.bio,
'pension_funds': _get_participant_pension_funds(participant)
}, status=status.HTTP_200_OK)
@api_view(['PUT'])
def add_to_fund(request):
'''
Add giftcard amount to a fund
'''
try:
giftcard = Giftcard.objects.get(barcode=request.query_params['barcode'])
pension_fund = PensionFund.objects.get(session_id=request.query_params['id'])
except Giftcard.DoesNotExist:
return Response({'msg': 'Invalid barcode'}, status=status.HTTP_400_BAD_REQUEST)
except PensionFund.DoesNotExist:
return Response({'msg': 'Invalid Pension Fund'}, status=status.HTTP_400_BAD_REQUEST)
pension_fund.amount += giftcard.amount
pension_fund.save()
return Response({
'amount': pension_fund.amount,
'msg': 'Amount added to pension fund',
'link': pension_fund.get_web_link()
}, status=status.HTTP_200_OK)
@api_view(['POST'])
def create_participant(request):
'''
Create a new user
'''
try:
user = User.objects.create_user(username=request.query_params['username'],
password=request.query_params['password'])
Participant.objects.create(user=user)
except:
return Response({'msg': 'Unable to create user'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'msg': 'Created new user'}, status=status.HTTP_200_OK)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,228
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/admin.py
|
from django.contrib import admin
from .models import Participant, PensionFund
admin.site.register(Participant)
admin.site.register(PensionFund)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,229
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/management/commands/create_entities.py
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from giftcards.models import PensionEntity
class Command(BaseCommand):
help = 'Create test PensionEntity'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of PensionEntities to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
for i in range(total):
kwargs = {
'username': 'PE{}'.format(i),
'password': 'test',
'first_name': 'PE_{}'.format(i),
'email': 'test{}@entity.com'.format(i)
}
user = User.objects.create_user(**kwargs)
PensionEntity.objects.create(user=user)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,230
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('authenticate', views.authenticate_digid),
path('addtofund', views.add_to_fund),
path('createuser', views.create_participant),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,231
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/migrations/0006_auto_20190412_2205.py
|
# Generated by Django 2.1.7 on 2019-04-12 20:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('giftcards', '0005_auto_20190412_2131'),
]
operations = [
migrations.RemoveField(
model_name='participant',
name='user',
),
migrations.DeleteModel(
name='Participant',
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,232
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/migrations/0005_auto_20190412_2131.py
|
# Generated by Django 2.1.7 on 2019-04-12 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('giftcards', '0004_remove_participant_image'),
]
operations = [
migrations.AlterField(
model_name='giftcard',
name='created',
field=models.DateTimeField(null=True, verbose_name='Date of creation'),
),
migrations.AlterField(
model_name='giftcard',
name='validity',
field=models.DateTimeField(null=True, verbose_name='Valid until'),
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,233
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/models.py
|
import uuid
from django.contrib.auth.models import User
from django.db import models
class Giftcard(models.Model):
'''
Model describes the Giftcard
'''
barcode = models.CharField(max_length=100, primary_key=True)
issued_by = models.ForeignKey('PensionEntity', on_delete=models.CASCADE, null=True)
amount = models.FloatField(default=0.0)
used = models.BooleanField(default=False)
created = models.DateTimeField('Date of creation', null=True)
validity = models.DateTimeField('Valid until', null=True)
def __str__(self):
return "{} - {}".format(self.barcode, self.amount)
class PensionEntity(models.Model):
'''
PensionEntity can be something like "APG"
'''
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=500, blank=True)
def __str__(self):
return self.user.get_username()
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,234
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/views.py
|
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Giftcard
from .serializers import GiftcardsSerializer
@api_view(['GET'])
def giftcard_valid(request):
'''
Check if gift card is valid
Example call: http://127.0.0.1:8000/giftcards/valid?barcode=INVALIDBARCODE
'''
try:
giftcard = Giftcard.objects.get(barcode=request.query_params['barcode'])
except Giftcard.DoesNotExist:
return Response({'msg': 'Invalid barcode'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'barcode': giftcard.barcode, 'amount': giftcard.amount}, status=status.HTTP_200_OK)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,235
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/migrations/0003_auto_20190412_1959.py
|
# Generated by Django 2.1.7 on 2019-04-12 17:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('giftcards', '0002_auto_20190412_1956'),
]
operations = [
migrations.RemoveField(
model_name='participant',
name='email',
),
migrations.RemoveField(
model_name='pensionentity',
name='email',
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,236
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/migrations/0003_auto_20190413_1356.py
|
# Generated by Django 2.1.7 on 2019-04-13 11:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mockdigid', '0002_auto_20190413_1238'),
]
operations = [
migrations.AlterField(
model_name='pensionfund',
name='participant',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mockdigid.Participant'),
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,237
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/migrations/0002_auto_20190413_1238.py
|
# Generated by Django 2.1.7 on 2019-04-13 10:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mockdigid', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='pensionfund',
name='id',
),
migrations.AddField(
model_name='pensionfund',
name='session_id',
field=models.CharField(default=django.utils.timezone.now, max_length=100, primary_key=True, serialize=False),
preserve_default=False,
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,238
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/migrations/0002_auto_20190412_1956.py
|
# Generated by Django 2.1.7 on 2019-04-12 17:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('giftcards', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='giftcard',
name='barcode',
field=models.CharField(max_length=100, primary_key=True, serialize=False),
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,239
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/serializers.py
|
from rest_framework import serializers
from .models import Participant, PensionFund
class ParticipantsSerializer(serializers.ModelSerializer):
class Meta:
model = Participant
fields = ("user")
class PensionFundsSerializer(serializers.ModelSerializer):
class Meta:
model = PensionFund
fields = ("fund_name", "active", "start_date", "end_date")
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,240
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/tests.py
|
import datetime
from django.test import TestCase
from django.utils import timezone
from rest_framework.test import APIRequestFactory
from .models import Giftcard
from .views import giftcard_valid
class GiftcardModelTests(TestCase):
def test_giftcard_is_invalid(self):
'''
Gift card is not valid
'''
factory = APIRequestFactory()
request = factory.get('http://127.0.0.1:8000/giftcards/valid?barcode=INVALIDBARCODE')
response = giftcard_valid(request)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['msg'], 'Invalid barcode')
def test_giftcard_is_valid(self):
'''
Valid giftcard, return amount
'''
barcode = 'abc'
amount = 10.0
Giftcard.objects.create(barcode=barcode, amount=amount)
factory = APIRequestFactory()
request = factory.get('http://127.0.0.1:8000/giftcards/valid?barcode={}'.format(barcode))
response = giftcard_valid(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['barcode'], barcode)
self.assertEqual(response.data['amount'], amount)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,241
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/management/commands/create_giftcards.py
|
import random
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.utils.crypto import get_random_string
from giftcards.models import Giftcard, PensionEntity
class Command(BaseCommand):
help = 'Create test giftcards'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
for i in range(total):
kwargs = {
'barcode': get_random_string(length=10),
'issued_by': PensionEntity.objects.order_by('?')[0],
'amount': random.randint(10, 50),
'created': timezone.now()
}
Giftcard.objects.create(**kwargs)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,242
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/migrations/0001_initial.py
|
# Generated by Django 2.1.7 on 2019-04-12 17:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Giftcard',
fields=[
('barcode', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('amount', models.FloatField(default=0.0)),
('created', models.DateTimeField(verbose_name='Date of creation')),
('validity', models.DateTimeField(verbose_name='Valid until')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_date', models.DateField(blank=True, null=True)),
('email', models.EmailField(max_length=100)),
('image', models.ImageField(upload_to='')),
('bio', models.TextField(blank=True, max_length=500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PensionEntity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=100)),
('bio', models.TextField(blank=True, max_length=500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='giftcard',
name='issued_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='giftcards.PensionEntity'),
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,243
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/serializers.py
|
from rest_framework import serializers
from .models import Giftcard
class GiftcardsSerializer(serializers.ModelSerializer):
class Meta:
model = Giftcard
fields = ("barcode", "amount")
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,244
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/giftcards/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('valid', views.giftcard_valid),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,245
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/apps.py
|
from django.apps import AppConfig
class MockdigidConfig(AppConfig):
name = 'mockdigid'
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,246
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/management/commands/create_pension_funds.py
|
import random
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from mockdigid.models import Participant, PensionFund
class Command(BaseCommand):
help = 'Create test pension funds'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
for i in range(total):
kwargs = {
'session_id': 'sess_id_{}'.format(get_random_string(length=5)),
'bsn': 'BSN_{}'.format(i),
'participant': Participant.objects.order_by('?')[0],
'fund_name': random.choice(['abp', 'pfzw', 'sf', 'gf']),
'active': random.choice([True, False]),
'ascription': random.choice(['ABP', 'PFZW', 'Some Fund', 'Gold Fund']),
'fulltime_salary': random.randint(100, 200)
}
PensionFund.objects.create(**kwargs)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,247
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/management/commands/create_users.py
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from mockdigid.models import Participant
class Command(BaseCommand):
help = 'Create test users'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
for i in range(total):
kwargs = {
'username': 'test{}'.format(i),
'password': 'test',
'first_name': 'first{}'.format(i),
'last_name': 'last{}'.format(i),
'email': 'test{}@admin.com'.format(i)
}
user = User.objects.create_user(**kwargs)
Participant.objects.create(user=user)
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,248
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/tests.py
|
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from rest_framework.test import APIRequestFactory
from .models import Participant
from .views import authenticate_digid
class ParticipantModelTests(TestCase):
def test_participant_is_invalid(self):
'''
Participant does not exist in the system
'''
factory = APIRequestFactory()
request = factory.post('http://127.0.0.1:8000/mockdigid/authenticate?username=TEST&password=TEST')
response = authenticate_digid(request)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.data['msg'], 'Username or password is wrong')
def test_participant_is_valid(self):
'''
Valid Participant, return details
'''
username = 'abc'
user = User.objects.create_user(username=username, password=username,
first_name='a', last_name='a')
p = Participant.objects.create(user=user)
factory = APIRequestFactory()
request = factory.post('http://127.0.0.1:8000/mockdigid/authenticate?username={}&password={}'.format(username, username))
response = authenticate_digid(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['first_name'], 'a')
self.assertEqual(response.data['last_name'], 'a')
self.assertEqual(response.data['bio'], '')
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,249
|
odysseyhack/boldchain
|
refs/heads/master
|
/backend/boldapi/mockdigid/migrations/0004_pensionfund_amount.py
|
# Generated by Django 2.1.7 on 2019-04-13 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mockdigid', '0003_auto_20190413_1356'),
]
operations = [
migrations.AddField(
model_name='pensionfund',
name='amount',
field=models.FloatField(default=0.0),
),
]
|
{"/backend/boldapi/giftcards/admin.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/admin.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/views.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/serializers.py"], "/backend/boldapi/mockdigid/serializers.py": ["/backend/boldapi/mockdigid/models.py"], "/backend/boldapi/giftcards/tests.py": ["/backend/boldapi/giftcards/models.py", "/backend/boldapi/giftcards/views.py"], "/backend/boldapi/giftcards/serializers.py": ["/backend/boldapi/giftcards/models.py"], "/backend/boldapi/mockdigid/tests.py": ["/backend/boldapi/mockdigid/models.py", "/backend/boldapi/mockdigid/views.py"]}
|
4,279
|
abdulrauf8788uni/AstarPathFinding-mini
|
refs/heads/master
|
/mygame/node.py
|
import pygame
from . import settings
# Node class
class Node:
def __init__(self, x, y, width):
self.x = x
self.y = y
self.x_pos = self.x * width
self.y_pos = self.y * width
self.width = width
self.color = settings.SILVER
self.heuristic = float("inf")
self.neighbours = []
def pos(self):
return (self.x, self.y)
def get_h(self):
return self.heuristic
def clac_heuristic(self, p2):
p1x, p1y = self.x, self.y
p2x, p2y = p2
self.heuristic = abs(p2x - p1x) + abs(p2y - p1y)
def calc_neighbours(self, grid):
if self.x > 0: # LEFT
if not grid[self.y][self.x - 1].is_barrier():
self.neighbours.append(grid[self.y][self.x - 1])
if self.x < settings.NUM_X - 1:
if not grid[self.y][self.x + 1].is_barrier():
self.neighbours.append(grid[self.y][self.x + 1])
if self.y > 0:
if not grid[self.y - 1][self.x].is_barrier():
self.neighbours.append(grid[self.y - 1][self.x])
if self.y < settings.NUM_X - 1:
if not grid[self.y + 1][self.x].is_barrier():
self.neighbours.append(grid[self.y + 1][self.x])
def draw_node(self, win):
pygame.draw.rect(win, self.color, (self.x_pos , self.y_pos, self.width, self.width))
# Is fuctions
def is_barrier(self):
return self.color == settings.BLACK
def is_start(self):
return self.color == settings.GREEN
def is_end(self):
return self.color == settings.RED
def is_path(self):
return self.color == settings.YELLOW
def is_explored(self):
return self.color == settings.SKIN
# Setters
def make_barrier(self):
self.color = settings.BLACK
def reset(self):
self.color = settings.SILVER
def make_start(self):
self.color = settings.GREEN
def make_end(self):
self.color = settings.RED
def make_path(self):
self.color = settings.YELLOW
def make_explored(self):
self.color = settings.DARK_SILVER
def __str__(self):
return f"Node at ({self.x}, {self.y})"
|
{"/main.py": ["/mygame/functions.py"], "/mygame/functions.py": ["/mygame/myqueue.py", "/mygame/node.py"]}
|
4,280
|
abdulrauf8788uni/AstarPathFinding-mini
|
refs/heads/master
|
/main.py
|
import pygame
from mygame.functions import makeGrid, drawGrid, getClickedNode, Astar
from mygame import settings
# Pygame settings
WIN = pygame.display.set_mode(settings.SCREEN)
pygame.display.set_caption(settings.CAPTION)
# Local variables
run = True
started = False
grid = makeGrid()
start_node = None
end_node = None
while run:
WIN.fill(settings.BLACK)
drawGrid(WIN, grid)
for event in pygame.event.get():
# Exiting fuction
if event.type == pygame.QUIT:
run = False
print(f"Exiting '{settings.CAPTION}'")
# Reset screen
if not started:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
grid = makeGrid()
start_node = None
end_node = None
if event.key == pygame.K_SPACE:
started = True
# Mouse left click functionality
if pygame.mouse.get_pressed()[0]:
# Start node, end node and barrier node logic
clicked_node = getClickedNode(grid)
if not start_node and not end_node:
start_node = clicked_node
start_node.make_start()
elif not end_node and clicked_node != start_node:
end_node = clicked_node
end_node.make_end()
elif clicked_node != start_node and clicked_node != end_node:
clicked_node.make_barrier()
# Mouse right click functionality
elif pygame.mouse.get_pressed()[2]:
clicked_node = getClickedNode(grid)
if clicked_node == end_node:
end_node = None
elif clicked_node == start_node:
if end_node:
end_node.make_start()
start_node = end_node
end_node = None
clicked_node.reset()
if started:
print("Running A* algorithm.")
Astar(WIN, grid, start_node, end_node)
started = False
pygame.display.update()
# pygame.quit()
|
{"/main.py": ["/mygame/functions.py"], "/mygame/functions.py": ["/mygame/myqueue.py", "/mygame/node.py"]}
|
4,281
|
abdulrauf8788uni/AstarPathFinding-mini
|
refs/heads/master
|
/mygame/myqueue.py
|
class myPriorityQueue():
def __init__(self):
self.queue = []
def insert(self, value, priority):
item = (value, priority)
self.queue.append(item)
def get(self):
item = self.get_least()
if item[0]:
self.queue.remove(item)
return item
def get_highest_priority(self):
item = self.get_heighest()
return item[0]
def delete_highest_priority(self):
item = self.get_heighest()
if item[0]:
self.queue.remove(item)
# helper functions
def get_least(self):
if not self.is_empty():
min_val = float("inf")
min_item = None
for val, prior in self.queue:
if prior < min_val:
min_val = prior
for item in self.queue:
if item[1] == min_val:
return item
else:
return None, None
def get_heighest(self):
if not self.is_empty():
max_val = 0
max_item = None
for val, prior in self.queue:
if prior > max_val:
max_val = prior
# return the latest item (item recently inserted), in case of same priority.
# for item in self.queue:
# if item[1] == max_val:
# max_item = item
# return max_item
# returns the item which was added first (in case of same proirity).
for item in self.queue:
if item[1] == max_val:
return item
else:
return None, None
def is_empty(self):
return len(self.queue) == 0
if __name__ == "__main__":
q = myPriorityQueue()
q.insert("Apple", 4)
q.insert("Mango", 4)
q.insert("grapes", 3)
q.insert("banana", 5)
print(q.get())
print(q.get())
print(q.get())
|
{"/main.py": ["/mygame/functions.py"], "/mygame/functions.py": ["/mygame/myqueue.py", "/mygame/node.py"]}
|
4,282
|
abdulrauf8788uni/AstarPathFinding-mini
|
refs/heads/master
|
/mygame/settings.py
|
# Settings
SCREEN = WIDTH, HEIGHT = 800, 800
# Number of blocks x, and y
NUM_X = 50
# Caption of the window
CAPTION = "Visualiztion tool"
# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
SILVER = (194, 194, 194)
DARK_SILVER = (166, 166, 166)
RED = (237, 36, 36)
GREEN = (26, 237, 46)
YELLOW = (237, 224, 36)
SKIN = (227, 194, 129)
|
{"/main.py": ["/mygame/functions.py"], "/mygame/functions.py": ["/mygame/myqueue.py", "/mygame/node.py"]}
|
4,283
|
abdulrauf8788uni/AstarPathFinding-mini
|
refs/heads/master
|
/mygame/functions.py
|
import pygame
from . import settings
import time
from .myqueue import myPriorityQueue
from .node import Node
# All Fuctions
def makeGrid():
node_width = settings.WIDTH // settings.NUM_X
grid = []
for row in range(settings.NUM_X):
grid.append([])
for node in range(settings.NUM_X):
new_node = Node(node, row, node_width)
grid[row].append(new_node)
return grid
def drawGrid(win, grid):
for row in grid:
for node in row:
node.draw_node(win)
def getClickedNode(grid):
node_width = settings.WIDTH // settings.NUM_X
clicked_x, clicked_y = pygame.mouse.get_pos()
pos_x = clicked_x // node_width
pos_y = clicked_y // node_width
return grid[pos_y][pos_x]
def recalculate_nodes(grid, end_node):
for row in grid:
for node in row:
node.clac_heuristic(end_node.pos())
for row in grid:
for node in row:
node.calc_neighbours(grid)
def Astar(win, grid, start_node, end_node):
recalculate_nodes(grid, end_node)
queue = myPriorityQueue()
explored = []
best_path_item = ([], float("inf"))
queue.insert(([start_node], 0), start_node.get_h())
while not queue.is_empty():
item, local_f_score = queue.get()
exploring = item[0][-1]
exploring.make_explored()
exploring.draw_node(win)
pygame.display.update()
explored.append(exploring)
children = exploring.neighbours
for child in children:
if not child in explored:
node_list, path_cost = item
new_path_cost = path_cost + 1
new_list = list(node_list)
if child.get_h() == 0:
for node in new_list:
if node != start_node and node != end_node:
node.make_path()
node.draw_node(win)
pygame.display.update()
return
f_score = new_path_cost + child.get_h() * 4
new_list.append(child)
explored.append(child)
# print("adding", end=" ")
# for path in new_list:
# print(path.pos(), end=' ')
# print("with f score", f_score)
# print(f"inserting {child.pos()}")
queue.insert((new_list, new_path_cost), f_score)
print("No possible path found. ")
|
{"/main.py": ["/mygame/functions.py"], "/mygame/functions.py": ["/mygame/myqueue.py", "/mygame/node.py"]}
|
4,290
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/integrations/svn_utils.py
|
# coding=utf-8
import datetime
import os.path
from pathlib import Path
import svn.local
from joblib import Memory
memory = Memory(Path("data"), verbose=1)
@memory.cache
def get_log(branch, from_dt, to_dt):
repo_path = os.path.abspath(branch)
client = svn.local.LocalClient(path_=repo_path)
log = client.log_default(
timestamp_from_dt=datetime.datetime.fromisoformat(from_dt),
timestamp_to_dt=datetime.datetime.fromisoformat(to_dt),
changelist=True,
)
return [log_e for log_e in log]
@memory.cache()
def get_log_for_revision(branch, revision):
repo_path = os.path.abspath(branch)
client = svn.local.LocalClient(path_=repo_path)
log = client.log_default(
revision_from=revision, revision_to=revision, changelist=True
)
return [log_e for log_e in log]
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,291
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/cov_profiler.py
|
# coding=utf-8
import json
import time
import subprocess
import re
import click
@click.group()
def cli():
pass
@cli.command("run")
@click.argument("config_file", type=click.Path(exists=True, readable=True))
def run_profiler_for_config(config_file):
"""
Run OpenCover profiler for a given configuration file
:param config_file: path to the configuration file
"""
# Load config file
with open(config_file, mode="r") as demo_file:
config = json.load(demo_file)
test_lists = config["runlists"]
with open(f"{config['branch']}_log_profiler.txt", mode="a") as log_file:
for testlist in test_lists:
run_coverage_profiler(config, testlist, log_file)
def run_coverage_profiler(config, testlist, log_file):
"""
Run OpenCover profiler for a given list of tests and configuration file
:param config: path to configuration file
:param testlist: path to file with list of tests to run
:param log_file: path to logging file
"""
def write_log(message):
print(message)
log_file.write(message + "\n")
log_file.flush()
write_log(testlist)
command, testlist_id = get_opencover_args(config, testlist)
write_log(f"Command: {command} --> Output: {testlist_id}")
# Run and profile tests with OpenCover
start = time.perf_counter()
subprocess.call(command)
end = time.perf_counter()
write_log(f"Run for {testlist_id}: {(end - start) / 60} minutes")
def get_opencover_args(config, testlist):
"""
Builds an OpenCover command according to the configuration file and list of tests provided.
:param config: path to configuration file
:param testlist: path to file with list of tests to run
:return: an OpenCover command and the id of the list of tests
"""
# Load relevant data from config
args = [
f" -target: {config['runner']}",
f" -targetargs:{' '.join(config['runner_args'])} {testlist}",
f" -threshold:{config['threshold']} ",
" -hideskipped:All ",
" -mergebyhash ",
# " -skipautoprops ",
f" -filter:{' '.join(config['filters'])} ",
f" -coverbytest:{';'.join(config['cover_by_test'])} ",
f" -searchdirs: {config['searchdirs_path']} ",
" -register:user ",
]
testlist_id = re.search(re.escape(config["runlists_path"]) + r"(.*).in", testlist).group(1)
# Build OpenCover command with arguments
command = [config["opencover_exec"]]
command.extend(args)
command.append(f"-output:{config['reports_path']}refactor_{testlist_id}.xml")
return command, testlist_id
if __name__ == "__main__":
cli()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,292
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/get_csv_lines.py
|
# coding=utf-8
import pickle
import backend
import os
import click
from backend.evaluation.summary import ResultsSummary
@click.group()
def cli():
pass
@cli.command("start")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
for batch in ["demo1", "demo2", "demo3", "demo4"]:
results = [
os.path.abspath(os.path.join(data_dir, x))
for x in os.listdir(data_dir)
if f"{batch}.pickle" in x # and len(x) == 24
]
for file in results:
summary: ResultsSummary = pickle.load(open(file, mode="rb"))
# print(f"{file[-16:-13]}")
print(f"==Results for {file}\n{summary.export_to_csv_line()}")
if __name__ == "__main__":
cli()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,293
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/evaluation/execution_item.py
|
# coding=utf-8
import random
import re
from dataclasses import dataclass
from typing import List
import numpy as np
from faker import Factory
from backend.integrations.database import get_testfails_for_revision
from backend.selection.problem_data import ProblemData
def print_function_values_to_screen(solutions, data):
# Adapted from JMetalPy
if type(solutions) is not list:
solutions = [solutions]
for solution in solutions:
print(str(solutions.index(solution)) + ": ", sep=" ", end="", flush=True)
print(solution.objectives, sep=" ", end="", flush=True)
pos = np.array(solution.variables[0])
rev_solution = list(data.tests_index[pos == 1])
print(f" (sol_size: {len(rev_solution)})")
@dataclass
class RevisionResults:
branch: str
rev_id: str
rev_date: str
changelist: list
error_no_changed_items: str
solutions_found: list
score: tuple # (score %, # matched, # expected, # tests)
solution_metrics: list
new_feedback_time: float
computing_time: float
orig_rev_history: set
real_rev_history: set
innocent: bool
def __init__(
self, svn_log_entry, branch, ignored_tests, previous_rev, masked=False
):
self.branch = branch
self.rev_id = svn_log_entry.revision
self.rev_date = str(svn_log_entry.date)
self.changelist = svn_log_entry.changelist
self.masked = masked
self.error_no_changed_items = None
self.innocent = None
self.set_revision_history(previous_rev, ignored_tests)
if masked:
self.fake = Factory.create()
self.solutions_found = []
self.score = (-1, -1, -1, -1)
self.new_feedback_time = 0
self.computing_time = 0
self.solution_metrics = []
def set_revision_history(self, previous: "RevisionResults", ignored: List[str]):
"""
Set revision history values (i.e. lists of failing tests names) for this revision.
:param previous: execution results from the previous revision
:param ignored: list of tests to ignore
"""
# Set original revision history
rev_results = get_testfails_for_revision(revision=self.rev_id)
self.orig_rev_history = set(rev_results.FULLNAME.values)
# If no failing tests returned from the database, use failing tests of previous revision
if len(self.orig_rev_history) == 0:
if previous is not None:
self.orig_rev_history = previous.orig_rev_history
else:
self.orig_rev_history = set()
# Set real revision history to be used
# 1. remove ignored tests based on configuration file
self.real_rev_history = set(
filter(
lambda test: all(x not in test for x in ignored), self.orig_rev_history
)
)
# 2. keep only failing tests not in previous revision
if previous is not None:
self.real_rev_history = set(
filter(
lambda x: x not in previous.orig_rev_history, self.real_rev_history
)
)
def print_results(self, data: ProblemData, fixed_demo=False):
"""
Print execution results to stdout.
:param data: data associated with this execution
:param fixed_demo: flag indicating whether this a random selection or not
"""
def get_fake_filename(file: str) -> str:
"""
Get a fake filename to mask the given file path.
:param file: file path for which a fake name should be generated
:return: a generated fake file path
"""
result = re.search(r"/.*\.(.*)", file)
if result is None:
# this is a directory -> get a random file path with some random file extension
return self.fake.file_path(
depth=random.randint(3, 5),
extension=random.choice(
["cs", "tsx", "json", "oml", "csproj", "xml"]
),
)
else:
# this a file -> get a random filename and keep the file extension
extension = result.group(1)
filename = self.fake.file_path(
depth=random.randint(3, 5), extension=" "
)
filename = "/".join([x.capitalize() for x in filename[:-1].split("/")])
return filename + extension
# Revision Id + Changelist
if self.masked:
fake_changelist = [(x[0], get_fake_filename(x[1])) for x in self.changelist]
changes = "\n\t".join(map(lambda x: str(x), fake_changelist))
else:
changes = "\n\t".join(map(lambda x: str(x), self.changelist))
revision_id = f"rev_id: {self.rev_id} ({self.rev_date})"
print(f"{revision_id}\nchangelist:\n\t{changes}")
# Execution results
if type(self.error_no_changed_items) == str:
# If no changed indexes were extracted, then print the error message
print(f"Revision {self.rev_id} failed due to {self.error_no_changed_items}")
self.print_revision_status()
else:
if fixed_demo:
# For random selections, the solution is stored in self.solutions_found
self.print_revision_status()
self.print_solution_score(0, self.solutions_found)
self.computing_time = 0.1
print(f"Solution Size: {len(self.solutions_found)} tests")
self.new_feedback_time = sum(
[
data.history_test_execution_times[test]
for test in self.solutions_found
]
)
print(
f"Solution Feedback Loop Time: {self.new_feedback_time:.0f} seconds"
)
else:
self.print_revision_status()
self.print_execution_results(data)
self.print_solution_list(data)
self.print_execution_inspection(data)
# separator
print("==========================" * 4)
def print_revision_status(self):
"""
Print status of this revision: pass/fail, number and list of failing tests.
"""
if len(self.orig_rev_history) == 0:
print(f"Revision {self.rev_id} had no failing tests")
else:
failed_tests = f"{len(self.orig_rev_history)} failed tests"
if self.masked:
print(f"Revision {self.rev_id} - {failed_tests}")
else:
joined = "\n\t".join(self.orig_rev_history)
print(f"Revision {self.rev_id} - {failed_tests}:\n\t{joined}")
def print_execution_results(self, data: ProblemData):
"""
Print results of this execution to stdout
- Computing Time
- Objectives values of each solution
- Score of each solution
:param data: data related to this execution
"""
# Computing Time
print("Computing time: " + str(self.computing_time))
# Objectives values of each solution
print_function_values_to_screen(self.solutions_found, data)
# Score of each solution
for i, solution in enumerate(self.solutions_found):
pos = np.array(solution.variables[0])
rev_solution = list(data.tests_index[pos == 1])
self.print_solution_score(i, rev_solution)
def print_execution_inspection(self, data: ProblemData):
"""
Print inspection conclusions over this execution.
Inspection checks if it was possible to select a test given the available data (before/after filters)
:param data: data related to this execution
"""
def inspection_checker(tests_data: np.ndarray):
"""
Check if the provided array of tests contains the failing tests for this revision.
The counts of possible/impossible to find tests are printed.
:param tests_data: array of test names
"""
available, impossible = 0, 0
for test in self.real_rev_history:
if any(x in test for x in tests_data):
# print(f"{test} = Available")
available += 1
else:
print(f"\t{test} = Impossible")
impossible += 1
print(f"Available={available} || Impossible={impossible}")
print(f"Check test availability vs original data - {data.original_tests.shape}")
inspection_checker(data.original_tests)
print(f"Check test availability vs filtered data - {data.tests_index.shape}")
inspection_checker(data.tests_index)
def print_solution_list(self, data: ProblemData):
"""
Print solution results for this execution: solution size, feedback time and list of selected tests
:param data: data related to this execution
"""
def get_fake_test_name() -> str:
"""
Get a generated fake test name
:return: a random test name
"""
test_name = self.fake.file_path(depth=random.randint(3, 5), extension=" ")
test_name = test_name[1:-2].replace("/", ".")
test_name = "Test." + ".".join(
[x.capitalize() for x in test_name.split(".")]
)
return test_name
# Store objectives values of this solution
solution = self.solutions_found[0]
self.solution_metrics = solution.objectives
pos = np.array(solution.variables[0])
rev_solution = list(data.tests_index[pos == 1])
# Solution Size + Feedback Time
print(f"Solution Size: {len(rev_solution)} tests")
self.new_feedback_time = sum(
[data.history_test_execution_times[test] for test in rev_solution]
)
print(f"Solution Feedback Loop Time: {self.new_feedback_time:.0f} seconds")
# Selected Tests
if self.masked:
rev_solution = [get_fake_test_name() for _ in rev_solution]
solution_tests = "\n\t".join(rev_solution)
print(f"\t{solution_tests}")
def print_solution_score(self, i: int, rev_solution: List[str]):
"""
Print score (i.e. number of failing tests found) of this solution.
:param i: number id of this solution
:param rev_solution: list of tests selected by this solution
"""
def get_matching_tests() -> List[str]:
"""
Get list of selected tests matching with the set of failing tests.
:return: a list of test names
"""
return [
test
for test in self.real_rev_history
if any(x in test for x in rev_solution)
]
sol_size = len(rev_solution)
sol_id = f"Solution {i} ({sol_size})"
if len(self.real_rev_history) == 0:
print(f"{sol_id} = only ignored tests")
self.score = (-1, 0, 0, sol_size)
else:
matching = get_matching_tests()
score = (len(matching) / len(self.real_rev_history)) * 100
match_vs_rev = f"{len(matching)}/{len(self.real_rev_history)}"
if self.masked:
print(f"{sol_id} = {match_vs_rev} ({score:.0f}%)")
else:
# Also print matching test names, if not using masked mode
print(f"{sol_id} = {match_vs_rev} ({score:.0f}%) -> {matching}")
self.score = (score, len(matching), len(self.real_rev_history), sol_size)
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,294
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/testsel_pipeline.py
|
# coding=utf-8
import json
import random
import click
import numpy as np
from jmetal.core.algorithm import Algorithm
import backend.selection.objectives as metrics
from backend.evaluation.execution_item import RevisionResults
from backend.evaluation.summary import ResultsSummary
from backend.integrations.svn_utils import get_log, get_log_for_revision
from backend.selection.problem_data import ProblemData
from backend.selection.test_selection import TestSelection, my_binary_mopso
np.random.seed(1234)
np.set_printoptions(threshold=np.inf)
OBJECTIVES_MAP = {
"ddu": metrics.calculate_ddu,
"n_tests": metrics.calculate_number_of_tests,
"fails": metrics.calculate_test_fails,
"exec_times": metrics.calculate_exec_times,
"norm_coverage": metrics.calculate_norm_coverage,
"coverage": metrics.calculate_coverage,
}
@click.group()
def cli():
pass
@cli.command("user")
@click.option(
"--objectives",
"-o",
required=True,
type=click.Choice(list(OBJECTIVES_MAP.keys())),
multiple=True,
)
@click.option("--masked", is_flag=True)
@click.argument("swarm_size", type=click.INT)
@click.argument("activity_matrix", type=click.Path(exists=True, readable=True))
@click.argument("demo_config", type=click.Path(exists=True, readable=True))
def run_optimization(objectives, masked, activity_matrix, demo_config, swarm_size):
"""
User input-based execution of the pipeline
"""
with open(demo_config, mode="r") as demo_file:
config = json.load(demo_file)
# Build problem data
data = ProblemData(
activity_matrix,
config["branch"],
config["fails_start_dt"],
config["from_dt"],
config["to_dt"],
ignore_tests=config["ignore_tests"],
)
data.swarm_size = swarm_size
while True:
revision = input("Target Revision Id: ")
log = [log_e for log_e in get_log_for_revision(config["branch_path"], revision)]
if not log:
continue
log_entry = log[0]
print(f"Running pipeline demo with the following objectives: {objectives}")
metrics = [OBJECTIVES_MAP[key] for key in objectives]
# Reset problem data to original matrices
data.reset()
# Run pipeline for revision
revision_results = RevisionResults(
log_entry, data.branch, data.ignore_tests, masked
)
run_pipeline(data, metrics, revision_results, config["ignore_changes"])
revision_results.print_results(data)
@cli.command("demo")
@click.option(
"--objectives",
"-o",
required=True,
type=click.Choice(list(OBJECTIVES_MAP.keys())),
multiple=True,
)
@click.option("--masked", is_flag=True)
@click.argument("swarm_size", type=click.INT)
@click.argument("activity_matrix", type=click.Path(exists=True, readable=True))
@click.argument("demo_config", type=click.Path(exists=True, readable=True))
@click.argument("output_file", type=click.Path())
def run_optimization_for_demo(
activity_matrix, demo_config, objectives, masked, swarm_size, output_file
):
def run_tool_for_revision(revision, data, previous_rev, ignore_changes):
print(f"Running pipeline demo with the following objectives: {objectives}")
metrics = [OBJECTIVES_MAP[key] for key in objectives]
# Reset problem data to original matrices
data.reset()
# Run pipeline for revision
revision_results = RevisionResults(
revision, data.branch, data.ignore_tests, previous_rev, masked
)
if len(revision_results.real_rev_history) > 0:
run_pipeline(data, metrics, revision_results, ignore_changes)
revision_results.print_results(data)
return revision_results
# Get log based on demo config
with open(demo_config, mode="r") as demo_file:
config = json.load(demo_file)
log = get_log(config["branch_path"], config["from_dt"], config["to_dt"])
# Build problem data
data = ProblemData(
activity_matrix,
config["branch"],
config["fails_start_dt"],
config["from_dt"],
config["to_dt"],
ignore_tests=config["ignore_tests"],
)
data.swarm_size = swarm_size
# Run tool for each revision
results = []
previous = None
# for log_e in log[:100]:
for log_e in log:
if not is_ignored_project(log_e.changelist, config["ignore_changes"]):
res = run_tool_for_revision(log_e, data, previous, config["ignore_changes"])
results.append(res)
previous = res
# Build results summary report
summary = ResultsSummary(results, data)
# - print summary to terminal
summary.export_to_text()
# save data to pickle
with open(output_file, mode="wb") as output:
summary.export_to_pickle(output)
@cli.command("random")
@click.option("--fixed", is_flag=True, help="Use a fixed test sample for evaluation")
@click.option(
"--filtered",
is_flag=True,
help="Filter matrix using changelist for evaluation fairness with MOTSD",
)
@click.argument("random_p", type=click.FLOAT)
@click.argument("all_tests", type=click.Path(exists=True, readable=True))
@click.argument("activity_matrix", type=click.Path(exists=True, readable=True))
@click.argument("demo_config", type=click.Path(exists=True, readable=True))
@click.argument("output_file", type=click.Path())
def run_random_demo(
activity_matrix, demo_config, output_file, random_p, all_tests, fixed, filtered
):
def run_tool_for_revision(revision, data, previous_rev, ignore_changes, t_sample):
revision_results = RevisionResults(
revision, data.branch, data.ignore_tests, previous_rev
)
if filtered:
# Running in filtered mode for evaluation fairness with MOTSD, i.e. filter matrix with changelist
# Get indexes for methods changed by a commit
changed_idxs = data.get_changed_indexes_for_changelist(
revision.changelist, ignore_changes
)
# Stop pipeline if no changed indexes were extracted
if type(changed_idxs) == str:
revision_results.error_no_changed_items = changed_idxs
return revision_results
if not fixed:
# Running in not fixed sample mode, i.e. get a new test sample for each commit
t_sample = random.sample(tests, int(random_p * (len(tests))))
if len(revision_results.real_rev_history) > 0:
revision_results.solutions_found = t_sample
revision_results.print_results(data, fixed_demo=True)
return revision_results
# Get log based on demo config
with open(demo_config, mode="r") as demo_file:
config = json.load(demo_file)
log = get_log(config["branch_path"], config["from_dt"], config["to_dt"])
# Read all tests file
with open(all_tests, mode="r") as tests_file:
tests = [test.strip() for test in tests_file.readlines()]
tests_sample = random.sample(tests, int(random_p * (len(tests))))
# Build problem data
data = ProblemData(
activity_matrix,
config["branch"],
config["fails_start_dt"],
config["from_dt"],
config["to_dt"],
ignore_tests=config["ignore_tests"],
)
# Run tool for each revision
results = []
previous = None
# for log_e in log[:100]:
for log_e in log:
if not is_ignored_project(log_e.changelist, config["ignore_changes"]):
res = run_tool_for_revision(
log_e, data, previous, config["ignore_changes"], tests_sample
)
results.append(res)
previous = res
# Build results summary report
summary = ResultsSummary(results, data)
# - print summary to terminal
summary.export_to_text()
# save data to pickle
with open(output_file, mode="wb") as output:
summary.export_to_pickle(output)
def run_pipeline(data, objectives, revision: RevisionResults, ignore_changes):
# Get indexes for methods changed by a commit
changed_idxs = data.get_changed_indexes_for_changelist(
revision.changelist, ignore_changes
)
# Stop pipeline if no changed indexes were extracted
if type(changed_idxs) == str:
revision.error_no_changed_items = changed_idxs
return
# Filter matrix and indexes based on commit
data.filter_data_for_commit(changed_idxs)
# Run optimizer for the reduced matrix
problem = TestSelection(data, objectives)
solution_front = run_optimizer(my_binary_mopso(problem, data.swarm_size), revision)
revision.solutions_found = solution_front
def run_optimizer(algorithm: Algorithm, revision: RevisionResults):
# Run optimizer algorithm
algorithm.run()
front = algorithm.get_result()
revision.computing_time = algorithm.total_computing_time
# return sorted(front, key=lambda x: (x.objectives[0]))
return sorted(front, key=lambda x: (x.objectives[0], x.objectives[1]))
def is_ignored_project(changelist, ignore_changes):
return all(
any(
ignore in change[1]
for ignore in ignore_changes
)
for change in changelist
)
if __name__ == "__main__":
cli()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,295
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/evaluation/utils.py
|
# coding=utf-8
from typing import List, Tuple
import numpy as np
from backend.evaluation.execution_item import RevisionResults
def get_metric_stats(data: np.ndarray) -> List[int]:
"""
Calculate basic stats and percentiles for the given metric data points.
- Stats: average, min, max, standard deviation
- Percentiles: 10, 25, 50, 75, 90
:param data: array of metric values
:return: list of stats and percentiles values
"""
stats = [np.average(data), np.min(data), np.max(data), np.std(data)]
percentiles = [np.percentile(data, p) for p in [10, 25, 50, 75, 90]]
return list(map(int, [*stats, *percentiles]))
def get_micro_recall(executions: List[RevisionResults]) -> float:
"""
Calculate micro-averaged recall for a list of tool executions.
:param executions: list of RevisionResults objects
:return: micro-recall value
"""
micro_recall_n = [res.score[1] for res in executions if res.score[2] > 0]
micro_recall_d = [res.score[2] for res in executions if res.score[2] > 0]
return sum(micro_recall_n) / sum(micro_recall_d)
def get_macro_recall(executions: List[RevisionResults]) -> float:
"""
Calculate macro-averaged recall for a list of tool executions.
:param executions: list of RevisionResults objects
:return: macro-recall value
"""
red_tests_recall = [
res.score[1] / res.score[2] for res in executions if res.score[2] > 0
]
return sum(red_tests_recall) / len(red_tests_recall)
def get_micro_precision(executions: List[RevisionResults]) -> float:
"""
Calculate micro-averaged precision for a list of tool executions.
:param executions: list of RevisionResults objects
:return: micro-precision value
"""
micro_precision_n = [res.score[1] for res in executions if res.score[2] > 0]
micro_precision_d = [res.score[3] for res in executions if res.score[2] > 0]
return sum(micro_precision_n) / sum(micro_precision_d)
def get_macro_precision(executions: List[RevisionResults]) -> float:
"""
Calculate macro-averaged precision for a list of tool executions.
:param executions: list of RevisionResults objects
:return: macro-precision value
"""
red_tests_precision = [
res.score[1] / res.score[3]
for res in executions
if res.score[2] > 0 and res.score[3] > 0
]
return sum(red_tests_precision) / len(red_tests_precision)
def get_error_stats(
pattern: str, executions: List[RevisionResults]
) -> Tuple[List, List]:
"""
Get lists of execution results with a given error message for all commits and for only red commits.
:param pattern: error message pattern to search for
:param executions: list of execution results
:return: two lists of execution results: one for all commits, another only for red commits
"""
error_cases = [res for res in executions if pattern in res.error_no_changed_items]
red_error_cases = [res for res in error_cases if len(res.real_rev_history) > 0]
return error_cases, red_error_cases
def get_tool_executions(executions: List[RevisionResults]) -> List[RevisionResults]:
"""
Get only the tool executions from a list of execution results.
:param executions: list of execution results
:return: list of tool executions
"""
return [res for res in executions if type(res.error_no_changed_items) != str]
def get_tool_no_executions(executions: List[RevisionResults]) -> List[RevisionResults]:
"""
Get only the failed tool executions from a list of execution results.
:param executions: list of execution results
:return: list of failed tool executions
"""
return [res for res in executions if type(res.error_no_changed_items) == str]
def get_total_innocent_reds(executions: List[RevisionResults]) -> int:
"""
Get number of innocent red commits from a given list of execution results.
:param executions: list of execution results
:return: number of innocent red commits
"""
count = 0
previous_fails = set()
for res in executions:
copy_res = res.real_rev_history.copy()
# If it is a tool execution over a red commit
if type(res.error_no_changed_items) != str and len(res.real_rev_history) > 0:
# If previous revision test fails is a superset, then the current commit is innocent
if previous_fails.issuperset(res.real_rev_history):
count += 1
res.innocent = True
# Filter tests present in previous revision test fails
# res.real_rev_history = set(filter(lambda x: x not in previous_fails, res.real_rev_history))
# # If the new set of test fails is empty, then the current commit is innocent
# if len(res.real_rev_history) == 0:
# count += 1
# res.innocent = True
previous_fails = copy_res
return count
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,296
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/opencover/parser.py
|
# coding=utf-8
import xml.etree.ElementTree as ET
import json
import itertools
import re
import numpy as np
import backend.opencover.utils as utils
def get_modules_from_report(report):
"""
Extracts maps for tests and code modules names to its XML elements.
:param report: path to XML coverage report
:return: 2 maps (tests and code) mapping a module name to its XML element
"""
_, root_modules = ET.parse(report).getroot()
test_modules, code_modules = {}, {}
for child in root_modules:
name = utils.get_module_name(child)
code_modules[name] = child
if "Tests" in name:
test_modules[name] = child
return test_modules, code_modules
def get_files_map_from_report(report, branch):
"""
Get map between file uids and their file path name.
:param report: path to the XML coverage report
:param branch: branch name to locate the start of the file path
:return: map between file uids and file path names
"""
test_modules, code_modules = get_modules_from_report(report)
files_map = {}
for module in code_modules.values():
for file in utils.get_module_files(module):
uid, path = file.attrib["uid"], file.attrib["fullPath"]
re_search = re.search(branch + r"\\(.*)\.cs", path)
if re_search:
name = re_search.group(1).replace("\\", ".")
files_map[uid] = name
return files_map
def build_tests_map(test_modules_map):
tests_uids_map = {}
for module in test_modules_map.values():
for method in utils.get_module_tracked_methods(module):
uid, name = method.attrib["uid"], method.attrib["name"]
tests_uids_map[uid] = name
return tests_uids_map
def build_methods_map(code_modules):
methods_uids_map = {}
counter = itertools.count(1)
for module in code_modules.values():
for clazz in utils.get_module_classes(module):
for method in utils.get_class_methods(clazz):
method_name = utils.get_method_name(method)
methods_uids_map["m" + str(next(counter))] = method_name
return methods_uids_map
def build_id_activity_matrix(code_modules, methods_uids_map, files_map):
# id-activity matrix
# key - method id
# value - test id
def get_method_id(method_name):
for (key, value) in methods_uids_map.items():
if value == method_name:
return key
activity_matrix = dict.fromkeys(methods_uids_map.keys(), [])
for module in code_modules.values():
for clazz in utils.get_module_classes(module):
for method in utils.get_class_methods(clazz):
method_name, tests = utils.get_method_coverage(method)
if method_name is not None:
method_id = get_method_id(method_name)
activity_matrix[method_id] = tests
# Update methods map with namespace fix
fix_methods_map_namespace(
files_map, method, method_id, method_name, methods_uids_map
)
return activity_matrix
def fix_methods_map_namespace(
files_map, method, method_id, method_name, methods_uids_map
):
"""
Replace method namespace with containing file path.
:param files_map: map of uids to file paths
:param method: method XML element
:param method_id: method uid
:param method_name: method name
:param methods_uids_map: map of uids to method names
"""
file_ref = utils.get_method_file_ref(method)
if file_ref is not None:
file_ref = file_ref.attrib["uid"]
if files_map.get(file_ref) is not None:
return_type, name = re.search(r"(.* ).*(::.*)", method_name).groups()
new_namespace = files_map[file_ref]
new_method_name = "".join([return_type, new_namespace, name])
methods_uids_map[method_id] = new_method_name
def build_binary_activity_matrix(id_act_matrix, method_uid_map, test_uid_map):
binary_activity_matrix = []
tests_index = list(test_uid_map.keys())
methods_index = list(method_uid_map.keys())
# Fill with empty cells
for _ in range(len(test_uid_map.keys())):
row = [0 for _ in range(len(method_uid_map.keys()))]
binary_activity_matrix.append(row)
# Fill with activity results
for method, tests in id_act_matrix.items():
if method is not None and tests is not None:
method_pos = methods_index.index(method)
for test in tests:
try:
test_pos = tests_index.index(test)
binary_activity_matrix[test_pos][method_pos] = 1
except ValueError:
pass
return binary_activity_matrix
def filter_activity_matrix(activity_matrix, method_uid_map, test_uid_map):
# Load data before filters
array_act_matrix = np.array(activity_matrix, dtype=bool)
tests_index = np.array(list(test_uid_map.keys()))
methods_index = np.array(list(method_uid_map.keys()))
print(f"-- Before filters: {array_act_matrix.shape}")
# Filter methods without activity
active_methods = ~np.all(array_act_matrix == 0, axis=0)
array_act_matrix = array_act_matrix[:, active_methods]
methods_index = methods_index[active_methods]
filtered_method_uid_map = {k: method_uid_map[k] for k in methods_index}
print(f"-- After methods filter: {array_act_matrix.shape}")
# Filter tests without activity
active_tests = ~np.all(array_act_matrix == 0, axis=1)
tests_index = tests_index[active_tests]
filtered_test_uid_map = {k: test_uid_map[k] for k in tests_index}
array_act_matrix = array_act_matrix[active_tests]
print(f"-- After tests filter: {array_act_matrix.shape}")
return array_act_matrix, filtered_method_uid_map, filtered_test_uid_map
def export_data_to_json(output_name, activity_matrix, methods_map, tests_map):
"""
Exports processed data to json files
:param output_name: name identifier for the JSON output files
:param tests_map: map of ids to tests
:param methods_map: map of ids to methods
:param activity_matrix: binary activity matrix (test x method)
"""
with open(f"data/jsons/testids_{output_name}.json", "w") as outfile:
json.dump(tests_map, outfile, indent=4)
with open(f"data/jsons/methodids_{output_name}.json", "w") as outfile:
json.dump(methods_map, outfile, indent=4)
with open(f"data/jsons/actmatrix_{output_name}.json", "w") as outfile:
json.dump(activity_matrix.astype("int").tolist(), outfile)
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,297
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/integrations/database.py
|
# coding=utf-8
import pandas as pd
import pyodbc
from pathlib import Path
from joblib import Memory
database_home = "data\\database\\"
memory = Memory(Path(f"{database_home}"), verbose=0)
DB_CONFIG = Path(f"{database_home}database.config").read_text()
@memory.cache
def get_test_name_fails(start_date: str, max_date: str) -> pd.DataFrame:
"""
Query the database for the number of test fails on a given date interval.
:param start_date: start date
:param max_date: maximum date
:return: 2-columns dataframe with the tests names and number of fails
"""
print("Querying database for test name fails")
query = Path(f"{database_home}test_name_fails.sql").read_text()
connection = pyodbc.connect(DB_CONFIG)
return pd.read_sql_query(query, connection, params=[start_date, max_date])
@memory.cache
def get_testfails_for_revision(revision: str) -> pd.DataFrame:
"""
Query the database for the tests that failed on a given revision.
:param revision: revision id
:return: 1-column dataframe with the test names
"""
print(f"Querying db for test fails for rev {revision}")
query = Path(f"{database_home}test_fails_rev.sql").read_text()
connection = pyodbc.connect(DB_CONFIG)
return pd.read_sql_query(query, connection, params=[revision])
@memory.cache
def get_test_execution_times(from_dt: str, to_dt: str) -> pd.DataFrame:
"""
Query the database for the test execution times on a given date interval.
:param from_dt: start date
:param to_dt: end date
:return: 2-columns dataframe with the tests names and test execution times
"""
print(f"Querying db for test execution times")
query = Path(f"{database_home}test_execution_times.sql").read_text()
connection = pyodbc.connect(DB_CONFIG)
return pd.read_sql_query(query, connection, params=[from_dt, to_dt])
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,298
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/selection/binary_mopso.py
|
# coding=utf-8
import random
from copy import copy
from typing import List, Optional
import numpy
from jmetal.config import store
from jmetal.core.algorithm import ParticleSwarmOptimization
from jmetal.core.problem import BinaryProblem
from jmetal.core.solution import BinarySolution
from jmetal.operator.mutation import BitFlipMutation
from jmetal.util.archive import BoundedArchive, NonDominatedSolutionListArchive
from jmetal.util.comparator import DominanceComparator, EpsilonDominanceComparator
from jmetal.util.solution_list import Evaluator, Generator
from jmetal.util.termination_criterion import TerminationCriterion
class BMOPSO(ParticleSwarmOptimization):
def __init__(
self,
problem: BinaryProblem,
swarm_size: int,
mutation: BitFlipMutation,
leaders: Optional[BoundedArchive],
epsilon: float,
termination_criterion: TerminationCriterion,
swarm_generator: Generator = store.default_generator,
swarm_evaluator: Evaluator = store.default_evaluator,
):
super(BMOPSO, self).__init__(problem=problem, swarm_size=swarm_size)
self.swarm_generator = swarm_generator
self.swarm_evaluator = swarm_evaluator
self.termination_criterion = termination_criterion
self.observable.register(termination_criterion)
self.mutation_operator = mutation
self.leaders = leaders
self.epsilon = epsilon
self.epsilon_archive = NonDominatedSolutionListArchive(
# EpsilonDominanceComparator(epsilon)
DominanceComparator()
)
self.c1_min = 1.5
self.c1_max = 2.0
self.c2_min = 1.5
self.c2_max = 2.0
self.r1_min = 0.0
self.r1_max = 1.0
self.r2_min = 0.0
self.r2_max = 1.0
self.weight_min = 0.1
self.weight_max = 0.5
self.change_velocity1 = -1
self.change_velocity2 = -1
self.dominance_comparator = DominanceComparator()
self.speed = numpy.zeros(
(
self.swarm_size,
self.problem.number_of_variables,
self.problem.number_of_tests,
),
dtype=float,
)
def create_initial_solutions(self) -> List[BinarySolution]:
return [self.swarm_generator.new(self.problem) for _ in range(self.swarm_size)]
def evaluate(self, solution_list: List[BinarySolution]):
return self.swarm_evaluator.evaluate(solution_list, self.problem)
def stopping_condition_is_met(self) -> bool:
return self.termination_criterion.is_met
def initialize_global_best(self, swarm: List[BinarySolution]) -> None:
for particle in swarm:
if self.leaders.add(particle):
self.epsilon_archive.add(copy(particle))
def initialize_particle_best(self, swarm: List[BinarySolution]) -> None:
for particle in swarm:
particle.attributes["local_best"] = copy(particle)
def initialize_velocity(self, swarm: List[BinarySolution]) -> None:
for i in range(self.swarm_size):
for j in range(self.problem.number_of_variables):
self.speed[i][j] = 0.0
def update_velocity(self, swarm: List[BinarySolution]) -> None:
for i in range(self.swarm_size):
best_particle = copy(swarm[i].attributes["local_best"])
best_global = self.select_global_best()
r1 = round(random.uniform(self.r1_min, self.r1_max), 1)
r2 = round(random.uniform(self.r2_min, self.r2_max), 1)
c1 = round(random.uniform(self.c1_min, self.c1_max), 1)
c2 = round(random.uniform(self.c2_min, self.c2_max), 1)
w = round(random.uniform(self.weight_min, self.weight_max), 1)
for var in range(swarm[i].number_of_variables):
best_particle_diff = numpy.subtract(
numpy.array(best_particle.variables[var]),
numpy.array(swarm[i].variables[var]),
dtype=numpy.float32,
)
best_global_diff = numpy.subtract(
numpy.array(best_global.variables[var]),
numpy.array(swarm[i].variables[var]),
dtype=numpy.float32,
)
self.speed[i][var] = (
w * numpy.array(self.speed[i][var])
+ (c1 * r1 * best_particle_diff)
+ (c2 * r2 * best_global_diff)
)
def update_position(self, swarm: List[BinarySolution]) -> None:
for i in range(self.swarm_size):
particle = swarm[i]
for j in range(particle.number_of_variables):
particle.variables[j] = self.compute_position(self.speed[i][j])
def compute_position(self, speed):
updated_positions = (
numpy.random.random_sample(speed.shape) < self._sigmoid(speed)
) * 1
return list(numpy.array(updated_positions, dtype=bool))
def _sigmoid(self, x):
return 1 / (1 + numpy.exp(-x))
def update_global_best(self, swarm: List[BinarySolution]) -> None:
for particle in swarm:
if self.leaders.add(copy(particle)):
self.epsilon_archive.add(copy(particle))
def update_particle_best(self, swarm: List[BinarySolution]) -> None:
for i in range(self.swarm_size):
flag = self.dominance_comparator.compare(
swarm[i], swarm[i].attributes["local_best"]
)
if flag != 1:
swarm[i].attributes["local_best"] = copy(swarm[i])
def perturbation(self, swarm: List[BinarySolution]) -> None:
for i in range(self.swarm_size):
if (i % 6) == 0:
self.mutation_operator.execute(swarm[i])
def select_global_best(self) -> BinarySolution:
leaders = self.leaders.solution_list
if len(leaders) > 2:
particles = random.sample(leaders, 2)
if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
best_global = copy(particles[0])
else:
best_global = copy(particles[1])
else:
best_global = copy(self.leaders.solution_list[0])
return best_global
def init_progress(self) -> None:
self.evaluations = self.swarm_size
self.leaders.compute_density_estimator()
self.initialize_velocity(self.solutions)
self.initialize_particle_best(self.solutions)
self.initialize_global_best(self.solutions)
def update_progress(self) -> None:
self.evaluations += self.swarm_size
self.leaders.compute_density_estimator()
observable_data = self.get_observable_data()
observable_data["SOLUTIONS"] = self.epsilon_archive.solution_list
self.observable.notify_all(**observable_data)
def get_result(self) -> List[BinarySolution]:
return self.epsilon_archive.solution_list
def get_name(self) -> str:
return "my-BMOPSO"
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,299
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/parse_xml.py
|
# coding=utf-8
import itertools
import os
from functools import partial
from multiprocessing import Pool
import click
import numpy as np
from backend.opencover import parser
@click.command("multiple")
@click.argument("reports_path")
@click.argument("output_name")
@click.argument("branch_name")
def process_multiple_xml_reports(reports_path, output_name, branch_name):
"""
Parse OpenCover's XML coverage reports into an activity matrix and tests/methods name maps
Assumes that the reports_path is a directory containing multiple coverage reports.
:param reports_path: path to directory containing the coverage reports
:param output_name: name of the output files generated for the activity matrix and maps
:param branch_name: name of the branch used for matching with files in the repository
"""
# Get coverage reports files from the directory
report_files = list(
map(
lambda report: os.path.abspath(os.path.join(reports_path, report)),
os.listdir(reports_path),
)
)
# Collect id-activity matrices for each report
with Pool(processes=2) as pool:
result = pool.map(
partial(get_id_activity_matrix, branch=branch_name), report_files
)
array_result = np.array(result)
id_act_matrices = array_result[:, 0]
methods_map, tests_map = array_result[0, 1], array_result[0, 2]
# Merge id-activity matrices
x = {
k: [d.get(k, []) for d in id_act_matrices]
for k in {k for d in id_act_matrices for k in d}
}
merged_id_act_matrices = {k: list(itertools.chain(*x[k])) for k in x}
# Export merged results
export_activity_matrix(output_name, methods_map, tests_map, merged_id_act_matrices)
def export_activity_matrix(output_name, methods_map, tests_map, activity_matrix):
"""
Build+export activity matrix and tests/methods map to JSON files.
:param output_name: name identifier for the JSON output files
:param activity_matrix: id-activity matrix
:param methods_map: methods map
:param tests_map: tests map
"""
# Convert id-activity matrix to binary activity matrix
print(f"Converting to the binary activity matrix")
binary_act_matrix = parser.build_binary_activity_matrix(
activity_matrix, methods_map, tests_map
)
# Filter activity matrix to reduce json file output size
print(f"Filtering methods/tests with no activty from the matrix")
filter_act_matrix, methods_map, tests_map = parser.filter_activity_matrix(
binary_act_matrix, methods_map, tests_map
)
# Export results to json
print(f"Exporting processed data to json files")
parser.export_data_to_json(output_name, filter_act_matrix, methods_map, tests_map)
print("Report processing done")
def get_id_activity_matrix(xml_report, branch):
# Get files map
print(f"Getting file map to handle c# namespace issues")
files_map = parser.get_files_map_from_report(xml_report, branch)
# Split xml report based on module type (test vs code)
print(f"Loading xml report {xml_report}")
test_modules, code_modules = parser.get_modules_from_report(xml_report)
# Fill uid maps with tests names and methods names
print(f"Mapping tests and methods uids")
tests_map = parser.build_tests_map(test_modules)
methods_map = parser.build_methods_map(code_modules)
# Build activity matrix based on ids
print(f"Building the id-activity matrix")
id_act_matrix = parser.build_id_activity_matrix(
code_modules, methods_map, files_map
)
print(f" {xml_report} -- {len(id_act_matrix)}")
return id_act_matrix, methods_map, tests_map
if __name__ == "__main__":
process_multiple_xml_reports()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,300
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/run_combos.py
|
# coding=utf-8
import os
from multiprocessing import Pool
import click
@click.group()
def cli():
pass
@cli.command("start")
@click.argument("input_list", type=click.Path(exists=True, readable=True))
def start(input_list):
with open(input_list, mode="r") as infile:
combos = infile.readlines()
with Pool(processes=3) as pool:
pool.map(run_command, combos)
def run_command(command):
print(f"Running command: {command}")
os.system(command)
if __name__ == "__main__":
cli()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,301
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/join_metrics.py
|
# coding=utf-8
import os
import pickle
import re
from itertools import permutations
from itertools import product
import click
from backend.evaluation.summary import ResultsSummary
from generate_tests import COVERAGE_MAP
from generate_tests import HISTORY_MAP
@click.group()
def cli():
pass
@cli.command("per_size")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
sizes = [5, 10, 25, 50, 100, 200, 400]
for size in [str(x) for x in sizes]:
print_merged_results(size, data_dir)
@cli.command("per_2combos")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
all_combos = []
for (cov, hist) in product(COVERAGE_MAP.items(), HISTORY_MAP.items()):
combos = [f"{m1}{m2}" for ((_, m1), (_, m2)) in permutations([cov, hist], 2)]
all_combos.extend(combos)
all_combos.sort()
for name in all_combos:
print_merged_results(name, data_dir)
@cli.command("per_3combos")
@click.argument("data_dir", type=click.Path(exists=True))
def start(data_dir):
all_combos = []
for (cov, hist1, hist2) in product(
COVERAGE_MAP.items(), HISTORY_MAP.items(), HISTORY_MAP.items()
):
if hist1 == hist2:
continue
combos = permutations([cov, hist1, hist2], 3)
for ((_, m1_name), (_, m2_name), (_, m3_name)) in combos:
metrics_name = f"{m1_name}{m2_name}{m3_name}"
if metrics_name in all_combos:
continue
all_combos.append(metrics_name)
all_combos.sort()
for name in all_combos:
print_merged_results(name, data_dir)
def print_merged_results(key, data_dir):
key_results = []
for batch in ["demo1", "demo2", "demo3", "demo4"]:
pattern = re.compile(r"_" + key + r"_" + batch + r".pickle")
results = [
os.path.abspath(os.path.join(data_dir, x))
for x in os.listdir(data_dir)
if re.search(pattern, x) is not None
]
aggregated: ResultsSummary = pickle.load(open(results[0], mode="rb"))
for file in results[1:]:
aggregated.merge_same(pickle.load(open(file, mode="rb")))
key_results.append(aggregated)
while len(key_results) > 1:
key_results[0].merge_diff(key_results.pop())
key_final = key_results.pop()
key_final.normalize_diff(4)
# print(f"{key}")
print(f"{key_final.export_to_csv_line(prefix=key.upper())}")
if __name__ == "__main__":
cli()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,302
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/evaluation/summary.py
|
# coding=utf-8
import gc
import pickle
from collections import Counter
from dataclasses import dataclass
from typing import List, BinaryIO
import numpy as np
from backend.evaluation import utils
from backend.evaluation.execution_item import RevisionResults
from backend.selection.problem_data import ProblemData
STATS_KEYS = ["avg", "min", "max", "std", "P10", "P25", "P50", "P75", "P90"]
@dataclass
class ResultsSummary:
data: List[RevisionResults]
commits: dict
executions: dict
errors: dict
red_stats: dict
solution_size: dict
computing_time: dict
orig_feedback_time: float
new_feedback_time: dict
def __init__(self, results: List[RevisionResults], data: ProblemData):
"""
Populate results summary with evaluation metrics values.
:param results: list of execution results
:param data: full dataset related to this set of results
"""
tool_executions = utils.get_tool_executions(results)
tool_no_exec = utils.get_tool_no_executions(results)
total_innocent_reds = utils.get_total_innocent_reds(results)
# Commits
red_commits = [res for res in results if len(res.real_rev_history) > 0]
self.commits = {
"total": len(results),
"red": len(red_commits),
"red_p": len(red_commits) / len(results),
}
# Executions
red_executions = [
res for res in tool_executions if len(res.real_rev_history) > 0
]
self.executions = {
"total": len(tool_executions),
"total_p": len(tool_executions) / len(results),
"red": len(red_executions),
"red_p": len(red_executions) / len(tool_executions),
}
# Errors
error_cases = {
"No .cs Files": "no covered .cs files",
"No Coverage Data": "no coverage data",
"New Files": "new files or modified",
}
self.errors = {}
for error, pattern in error_cases.items():
total, red = utils.get_error_stats(pattern, tool_no_exec)
self.errors[error] = {"total": len(total), "red": len(red)}
# Red Stats: "yes, at least one", Precision, Recall
self.set_red_stats(red_executions, total_innocent_reds)
# Solution Size, Computing Time
self.set_solution_size(tool_executions)
self.set_computing_time(tool_executions)
# Feedback Time (original, new)
self.orig_feedback_time = sum(data.history_test_execution_times.values())
self.set_feedback_time(tool_executions)
# Store data
self.data = results
for res in self.data:
res.solutions_found = []
def set_red_stats(self, red_execs: List[RevisionResults], total_innocent_reds: int):
"""
Populate map of values related to red executions, namely Precision and Recall values.
:param red_execs: list of execution results for red commits
:param total_innocent_reds: total number of innocent red commits
"""
not_found_red_tests = [res for res in red_execs if res.score[0] == 0]
red_ignored_tests = [res for res in red_execs if res.score[0] == -1]
found_red_tests_at_least_one = [res for res in red_execs if res.score[0] > 0]
self.red_stats = {
"Innocent Reds": total_innocent_reds,
"Only Ignored Tests": len(red_ignored_tests),
"Valid Reds": len(red_execs),
"No": len(not_found_red_tests) / len(red_execs),
"At Least One": len(found_red_tests_at_least_one) / len(red_execs),
"Macro-Precision": utils.get_macro_precision(red_execs),
"Micro-Precision": utils.get_micro_precision(red_execs),
"Macro-Recall": utils.get_macro_recall(red_execs),
"Micro-Recall": utils.get_micro_recall(red_execs),
}
def set_solution_size(self, executions: List[RevisionResults]):
"""
Populate solution size map with stats and percentiles values
- Stats: average, min, max, standard deviation
- Percentiles: 10, 25, 50, 75, 90
:param executions: list of execution results
"""
sizes = np.array([res.score[3] for res in executions])
self.solution_size = dict(zip(STATS_KEYS, utils.get_metric_stats(sizes)))
def set_computing_time(self, executions: List[RevisionResults]):
"""
Populate computing time map with stats and percentiles values
- Stats: average, min, max, standard deviation
- Percentiles: 10, 25, 50, 75, 90
:param executions: list of execution results
"""
times = np.array(
[res.computing_time for res in executions if res.computing_time > 0]
)
self.computing_time = dict(zip(STATS_KEYS, utils.get_metric_stats(times)))
def set_feedback_time(self, executions: List[RevisionResults]):
"""
Populate feedback time map with stats and percentiles values
- Stats: average, min, max, standard deviation
- Percentiles: 10, 25, 50, 75, 90
:param executions: list of execution results
"""
feedback_times = np.array(
[res.new_feedback_time for res in executions if res.new_feedback_time > 0]
)
self.new_feedback_time = dict(
zip(STATS_KEYS, utils.get_metric_stats(feedback_times))
)
def recompute_innocent(self):
"""
Recompute all evaluation metrics in this summary using the innocent commit filter
"""
results = self.data
tool_executions = utils.get_tool_executions(results)
total_innocent_reds = utils.get_total_innocent_reds(results)
red_executions = [
res for res in tool_executions if len(res.real_rev_history) > 0
]
not_innocent_red_executions = [
res for res in red_executions if res.innocent is not True
]
self.set_red_stats(not_innocent_red_executions, total_innocent_reds)
self.set_solution_size(tool_executions)
self.set_computing_time(tool_executions)
self.set_feedback_time(tool_executions)
def export_to_text(self):
"""
Export the summary in text format to stdout
"""
commits = list(self.commits.values())
print(f"# Commits - {commits[0]} (red: {commits[1]} -> {commits[2]*100:.0f}%)")
execs = list(self.executions.values())
print(
f"Tool Executions: {execs[0]} -> {execs[1]*100:.0f}% "
f" (red: {execs[2]} - {execs[3]*100:.0f}%)"
)
for error, [total, red] in self.errors.items():
print(
f"# {error}: {self.errors[error][total]} (red: {self.errors[error][red]})"
)
print("Tool Found Red Test(s) ?")
red_stats = list(self.red_stats.values())
print(f"Innocent Reds: {red_stats[0]}")
print(f"Only Ignored Tests: {red_stats[1]}")
print(f"Score Stats (for actual reds)")
print(f"Valid Reds: {red_stats[2]}")
print(f"No: {red_stats[3] * 100:.0f}%")
print(f"Yes, At least one: {red_stats[4] * 100:.0f}%")
print(f"Macro-Precision: {red_stats[5] * 100:.0f}%")
print(f"Micro-Precision: {red_stats[6] * 100:.0f}%")
print(f"Macro-Recall: {red_stats[7] * 100:.0f}%")
print(f"Micro-Recall: {red_stats[8] * 100:.0f}%")
solution_size = list(self.solution_size.values())
self.print_metric_stats("Solution Size", solution_size)
computing_time = list(self.computing_time.values())
self.print_metric_stats("Computing Time", computing_time)
print(f"Original Feedback Time: {self.orig_feedback_time:.0f}")
feedback_time = list(self.new_feedback_time.values())
self.print_metric_stats("New Feedback Time", feedback_time)
def export_to_pickle(self, file: BinaryIO):
"""
Exports the summary to a pickle file.
:param file: output file descriptor
"""
# Force garbage collection due to memory concerns when handling multiple summaries
gc.collect()
pickle.dump(self, file, protocol=pickle.HIGHEST_PROTOCOL)
def export_to_csv_line(self, only_stats: bool = False, prefix: str = None) -> str:
"""
Get a single CSV line representation of the summary using "|" (vertical bar) as separator.
:param only_stats: flag indicating if the line should contain only metrics and stats values
:param prefix: a custom first element for the line, if needed
:return: the CSV line as a string
"""
line = [prefix] if prefix is not None else []
if not only_stats:
line.extend(list(self.commits.values()))
line.extend(list(self.executions.values()))
for error, [total, red] in self.errors.items():
line.extend([self.errors[error][total], self.errors[error][red]])
line.extend(list(self.red_stats.values()))
line.extend(list(self.solution_size.values()))
line.extend(list(self.computing_time.values()))
line.extend([int(self.orig_feedback_time)])
line.extend(list(self.new_feedback_time.values()))
# stringify items
line = [str(x) for x in line]
return "|".join(line)
@staticmethod
def print_metric_stats(name: str, data: List):
"""
Print avg, min, max, stdev + percentiles (10, 25, 50, 75, 90)
:param name: name of evaluation metric
:param data: list of data points
"""
def unpack(values):
# Helper function for unpacking the values into the f-string
return ",".join(str(x) for x in values)
stats, percentiles = data[0:4], data[4:]
print(f"{name} (avg, min, max, std): ({unpack(stats)})")
print(f"{name} Percentiles (10, 25, 50, 75, 90): ({unpack(percentiles)})")
def merge_same(self, other: "ResultsSummary"):
"""
Merge the results of two summaries from the same evaluation period.
Note: this assumes that the summaries are equal except for stats, which are added up
:param other: the other ResultsSummary object to be merged with
"""
self.red_stats = add_counter(self.red_stats, other.red_stats)
self.new_feedback_time = add_counter(
self.new_feedback_time, other.new_feedback_time
)
def merge_diff(self, other: "ResultsSummary"):
"""
Merge the results of two summaries from different evaluation periods.
:param other: the other ResultsSummary object to be merged with
"""
self.commits = add_counter(self.commits, other.commits)
self.executions = add_counter(self.executions, other.executions)
for error in self.errors:
self.errors[error] = add_counter(self.errors[error], other.errors[error])
self.red_stats = add_counter(self.red_stats, other.red_stats)
self.solution_size = add_counter(self.solution_size, other.solution_size)
self.computing_time = add_counter(self.computing_time, other.computing_time)
self.orig_feedback_time = self.orig_feedback_time + other.orig_feedback_time
self.new_feedback_time = add_counter(
self.new_feedback_time, other.new_feedback_time
)
def normalize_diff(self, n: int):
"""
Normalize (average) results in this summary by a number n
"""
self.commits["red_p"] = self.commits["red_p"] / n
self.executions["total_p"] = self.executions["total_p"] / n
self.executions["red_p"] = self.executions["red_p"] / n
for k in self.red_stats:
self.red_stats[k] = self.red_stats[k] / n
for k in self.solution_size:
self.solution_size[k] = int(self.solution_size[k] / n)
for k in self.computing_time:
self.computing_time[k] = int(self.computing_time[k] / n)
self.orig_feedback_time = self.orig_feedback_time / n
for k in self.new_feedback_time:
self.new_feedback_time[k] = int(self.new_feedback_time[k] / n)
def add_counter(prop1: dict, prop2: dict):
"""
Helper function to add the Counters of two dicts without breaking in case a key doesn't exist in both dicts.
:param prop1: a dictionary
:param prop2: another dictionary
:return: a Counter object with the sum of the two dicts Counters
"""
c = Counter()
c.update({x: 1 for x in prop1})
prop1 = c + Counter(prop1) + Counter(prop2)
for x in prop1:
prop1[x] -= 1
return prop1
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,303
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/opencover/utils.py
|
# coding=utf-8
""" Helper methods to access xml elements """
def get_module_name(module):
return next(module.iter("ModuleName")).text
def get_module_tracked_methods(module):
return next(module.iter("TrackedMethods"))
def get_module_classes(module):
return next(module.iter("Classes"))
def get_module_files(module):
return next(module.iter("Files"))
def get_class_methods(clazz):
return next(clazz.iter("Methods"))
def get_method_name(method):
return next(method.iter("Name")).text
def get_method_file_ref(method):
return next(method.iter("FileRef"), None)
def get_method_coverage(method):
# Get method point tag
method_point = next(method.iter("MethodPoint"), None)
if method_point is None or not list(method_point):
return [None, None]
# Look at tracked method refs
tracked_refs = method_point[0]
if not list(tracked_refs):
return [None, None]
# Return uids of tests that visit the 1st sequence point
tests_uids = list(map(lambda x: x.attrib["uid"], tracked_refs))
return [get_method_name(method), tests_uids]
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,304
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/generate_tests.py
|
# coding=utf-8
COVERAGE_MAP = {"ddu": "d", "norm_coverage": "n"}
HISTORY_MAP = {"exec_times": "e", "fails": "f", "n_tests": "t"}
DATA = [
(
"demo1",
"all_trunk_demo1_tests.in",
"data\\jsons\\actmatrix_v2_trunk_demo1.json",
"data\\poc_demos\\trunk_demo1.config",
),
(
"demo2",
"all_trunk_demo2_tests.in",
"data\\jsons\\actmatrix_v2_trunk_demo2.json",
"data\\poc_demos\\trunk_demo2.config",
),
(
"demo3",
"all_trunk_demo3_tests.in",
"data\\jsons\\actmatrix_v2_trunk_demo3.json",
"data\\poc_demos\\trunk_demo3.config",
),
(
"demo4",
"all_trunk_demo4_tests.in",
"data\\jsons\\actmatrix_v2_trunk_demo4.json",
"data\\poc_demos\\trunk_demo4.config",
),
]
COMMAND = "python testsel_pipeline.py demo"
RANDOM_COMMAND = "python testsel_pipeline.py random"
OUTPUT_PATH = "data\\results\\thesis"
def print_command(metrics, size, data, config, output):
print(f"{COMMAND} {metrics} {size} {data} {config} {output}.pickle > {output}.out")
def print_random_command(
tests, data, config, output, random_prob, fixed=False, filtered=False
):
options = ""
if fixed:
options += "--fixed "
if filtered:
options += "--filtered "
print(
f"{RANDOM_COMMAND} {options} {random_prob} {tests} {data} {config} {output}.pickle > {output}.out"
)
def baseline_tests():
base = f"{OUTPUT_PATH}\\baseline\\base_"
metrics, size = "-o ddu -o fails", 100
for (batch, json_data, config) in DATA:
name = f"{base}{batch}"
print_command(metrics, size, json_data, config, name)
print()
def metrics_2combos_tests():
from itertools import permutations, product
base = f"{OUTPUT_PATH}\\metrics_combos\\mcombos_"
for (batch, _, json_data, config) in DATA:
for (cov, hist) in product(COVERAGE_MAP.items(), HISTORY_MAP.items()):
combos = permutations([cov, hist], 2)
for ((m1_key, m1_name), (m2_key, m2_name)) in combos:
name = f"{base}{m1_name}{m2_name}_{batch}"
metrics, size = f"-o {m1_key} -o {m2_key}", 100
print_command(metrics, size, json_data, config, name)
print()
print()
def metrics_3combos_tests():
from itertools import permutations, product
base = f"{OUTPUT_PATH}\\metrics_combos\\mcombos_"
for (batch, _, json_data, config) in DATA:
combos_done = []
for (cov, hist1, hist2) in product(
COVERAGE_MAP.items(), HISTORY_MAP.items(), HISTORY_MAP.items()
):
if hist1 == hist2:
continue
combos = permutations([cov, hist1, hist2], 3)
for ((m1_key, m1_name), (m2_key, m2_name), (m3_key, m3_name)) in combos:
metrics_name = f"{m1_name}{m2_name}{m3_name}"
if metrics_name in combos_done:
continue
name = f"{base}{metrics_name}_{batch}"
metrics, size = f"-o {m1_key} -o {m2_key} -o {m3_key}", 100
print_command(metrics, size, json_data, config, name)
combos_done.append(metrics_name)
print()
print()
def metrics_4combos_tests():
from itertools import permutations, product
base = f"{OUTPUT_PATH}\\metrics_combos\\mcombos_"
for (batch, _, json_data, config) in DATA:
combos_done = []
for (cov, hist1, hist2, hist3) in product(
COVERAGE_MAP.items(),
HISTORY_MAP.items(),
HISTORY_MAP.items(),
HISTORY_MAP.items(),
):
if hist1 == hist2 or hist1 == hist3 or hist2 == hist3:
continue
combos = permutations([cov, hist1, hist2, hist3], 4)
for (
(m1_key, m1_name),
(m2_key, m2_name),
(m3_key, m3_name),
(m4_key, m4_name),
) in combos:
metrics_name = f"{m1_name}{m2_name}{m3_name}{m4_name}"
if metrics_name in combos_done:
continue
name = f"{base}{metrics_name}_{batch}"
metrics, size = f"-o {m1_key} -o {m2_key} -o {m3_key} -o {m4_key}", 100
print_command(metrics, size, json_data, config, name)
combos_done.append(metrics_name)
print()
print()
def swarm_size_tests():
base = f"{OUTPUT_PATH}\\swarm_size\\swsize_"
metrics = "-o ddu -o fails"
for (batch, _, json_data, config) in DATA:
sizes = [5, 10, 25, 50, 100, 200, 400]
for size in sizes:
name = f"{base}{size}_{batch}"
print_command(metrics, size, json_data, config, name)
print()
def random_fixed_tests():
base = f"{OUTPUT_PATH}\\random_fixed\\ranfixed_"
for (batch, tests, json_data, config) in DATA:
random_p = [0.10, 0.15, 0.20, 0.25]
for prob in random_p:
for i in range(1, 11):
name = f"{base}{str(int(prob*100))}_{i}_{batch}"
print_random_command(
tests, json_data, config, name, prob, fixed=True, filtered=False
)
print()
print()
def random_dynamic_tests():
base = f"{OUTPUT_PATH}\\random_dynamic\\randynam_"
for (batch, tests, json_data, config) in DATA:
random_p = [0.10, 0.15, 0.20, 0.25]
for prob in random_p:
for i in range(1, 11):
name = f"{base}{str(int(prob*100))}_{i}_{batch}"
print_random_command(
tests, json_data, config, name, prob, fixed=False, filtered=False
)
print()
print()
def random_dynamic_filtered_tests():
base = f"{OUTPUT_PATH}\\random_dynamic_filter\\randynamfilter_"
for (batch, tests, json_data, config) in DATA:
random_p = [0.10, 0.15, 0.20, 0.25]
for prob in random_p:
for i in range(1, 11):
name = f"{base}{str(int(prob*100))}_{i}_{batch}"
print_random_command(
tests, json_data, config, name, prob, fixed=False, filtered=True
)
print()
print()
def random_fixed_filtered_tests():
base = f"{OUTPUT_PATH}\\random_fixed_filter\\ranfixedfilter_"
for (batch, tests, json_data, config) in DATA:
random_p = [0.10, 0.15, 0.20, 0.25]
for prob in random_p:
for i in range(1, 11):
name = f"{base}{str(int(prob*100))}_{i}_{batch}"
print_random_command(
tests, json_data, config, name, prob, fixed=True, filtered=True
)
print()
print()
if __name__ == "__main__":
random_fixed_tests()
random_fixed_filtered_tests()
random_dynamic_tests()
random_dynamic_filtered_tests()
# baseline
# baseline_tests()
# swarm size
# swarm_size_tests()
# metrics 2-combos
# metrics_2combos_tests()
# metrics 3-combos
# metrics_3combos_tests()
# metrics 4-combos
# metrics_4combos_tests()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,305
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/join_randoms.py
|
# coding=utf-8
import os
import pickle
import re
from typing import Optional, Any
import click
from backend.evaluation.summary import ResultsSummary
@click.group()
def cli():
pass
@cli.command("per_batch")
@click.argument("data_dir", type=click.Path(exists=True))
@click.option(
"--innocent", is_flag=True, help="Recompute each sample using innocent filter"
)
def start(data_dir, innocent):
for batch in ["demo1", "demo2", "demo3", "demo4"]:
for prob in [str(int(x * 100)) for x in [0.10, 0.15, 0.20, 0.25]]:
pattern = re.compile(prob + r"_\d+_" + batch + r".pickle")
results = [
os.path.abspath(os.path.join(data_dir, x))
for x in os.listdir(data_dir)
if re.search(pattern, x) is not None
]
aggregated: ResultsSummary = pickle.load(open(results[0], mode="rb"))
if innocent:
aggregated.recompute_innocent()
for file in results[1:]:
summary = pickle.load(open(file, mode="rb"))
if innocent:
summary.recompute_innocent()
aggregated.merge_same(summary)
for k in aggregated.red_stats:
aggregated.red_stats[k] = aggregated.red_stats[k] / 10
for k in aggregated.new_feedback_time:
aggregated.new_feedback_time[k] = aggregated.new_feedback_time[k] / 10
print(f"{aggregated.export_to_csv_line()}")
@cli.command("per_prob")
@click.argument("data_dir", type=click.Path(exists=True))
@click.option(
"--innocent", is_flag=True, help="Recompute each sample using innocent filter"
)
def start(data_dir, innocent):
for prob in [str(int(x * 100)) for x in [0.10, 0.15, 0.20, 0.25]]:
prob_results = []
for batch in ["demo1", "demo2", "demo3", "demo4"]:
pattern = re.compile(prob + r"_\d+_" + batch + r".pickle")
results = [
os.path.abspath(os.path.join(data_dir, x))
for x in os.listdir(data_dir)
if re.search(pattern, x) is not None
]
aggregated: ResultsSummary = pickle.load(open(results[0], mode="rb"))
if innocent:
aggregated.recompute_innocent()
for file in results[1:]:
summary = pickle.load(open(file, mode="rb"))
if innocent:
summary.recompute_innocent()
aggregated.merge_same(summary)
for k in aggregated.red_stats:
aggregated.red_stats[k] = aggregated.red_stats[k] / 10
for k in aggregated.new_feedback_time:
aggregated.new_feedback_time[k] = aggregated.new_feedback_time[k] / 10
prob_results.append(aggregated)
while len(prob_results) > 1:
prob_results[0].merge_diff(prob_results.pop())
prob_final = prob_results.pop()
prob_final.normalize_diff(4)
print(f"{prob_final.export_to_csv_line()}")
if __name__ == "__main__":
cli()
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,306
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/selection/problem_data.py
|
# coding=utf-8
import json
import re
from typing import List
import pandas as pd
from collections import defaultdict
from dataclasses import dataclass
import numpy as np
from backend.integrations import database
def normalize_test_name(tests: np.ndarray):
"""
Normalize test names to match database.
- Replace / with + to support dashboard tests
- Discard return type
- Only keep namespace and method name
:param tests: list of test names
:return: array with test names normalized
"""
return map(
lambda test: ".".join(
re.search(r"(.*)::(.*)\(", test.replace("/", "+").split(" ")[1]).groups()
),
tests,
)
def normalize_iterative_test_name(test: str):
"""
Normalize iterative test name, if necessary
:param test: test name
:return: normalized test name
"""
if re.match(r"(.*\..+)\+.+", test):
return re.match(r"(.*\..+)\+.+", test).group(1)
return test
def get_historical_metric_map(query_results: pd.DataFrame) -> dict:
"""
Convert 2-columns query results to a dictionary mapping the test name to the historical metric value
:param query_results: 2-columns pandas dataframe with the query results
:return: dictionary mapping the test names to the historical metric values
"""
history_metric_map = defaultdict(int)
for test, time in query_results.values:
key = normalize_iterative_test_name(test)
history_metric_map[key] += time
return history_metric_map
@dataclass
class ProblemData:
original_matrix: np.ndarray
original_tests: np.ndarray
original_methods: np.ndarray
activity_matrix: np.ndarray
tests_index: np.ndarray
methods_index: np.ndarray
methods_map: dict
history_test_fails: dict
history_test_execution_times: dict
new_files: dict
branch: str
ignore_tests: list
swarm_size: int
def __init__(
self,
activity_matrix_path,
branch,
fails_start_date,
from_date,
to_date,
ignore_tests=None,
):
"""
ProblemData initialization.
- Load JSON data for an activity matrix file
- Filter tests with no activity (zero rows)
:param activity_matrix_path: path of the activity matrix JSON file
"""
if ignore_tests is None:
ignore_tests = []
self.branch = branch
self.ignore_tests = ignore_tests
self.load_json_data(activity_matrix_path)
self.filter_tests_with_no_activity()
# Load historical data
self.history_test_fails = get_historical_metric_map(
database.get_test_name_fails(fails_start_date, from_date)
)
self.history_test_execution_times = get_historical_metric_map(
database.get_test_execution_times(from_date, to_date)
)
self.new_files = {}
def load_json_data(self, activity_matrix):
"""
Loads JSON data for an activity matrix.
The loaded JSON data includes:
- The binary activity matrix itself
- The tests considered
- The methods considered
:param activity_matrix: path of the activity matrix JSON file
"""
print(f"Loading json data from {activity_matrix}")
# Find relative path and timestamp to load tests/methods maps
actm_pattern = r"(.*)\\actmatrix_(.*)\.json"
path, timestamp = re.search(actm_pattern, activity_matrix).groups()
# activity matrix
with open(activity_matrix) as actm_file:
self.activity_matrix = np.array(json.load(actm_file), dtype=bool)
self.original_matrix = self.activity_matrix
# tests
with open(f"{path}\\testids_{timestamp}.json") as tests_file:
tests = np.array(list(json.load(tests_file).values()))
self.tests_index = np.array(list(normalize_test_name(tests)))
self.original_tests = self.tests_index
# methods
with open(f"{path}\\methodids_{timestamp}.json") as methods_file:
self.methods_map = json.load(methods_file)
# print(f"methods map: {len(self.methods_map.keys())}")
self.methods_index = np.array(list(self.methods_map.values()))
self.original_methods = self.methods_index
def reset(self):
"""
Reset current activity matrix, tests and methods data to the originally loaded data.
"""
self.activity_matrix = self.original_matrix
self.tests_index = self.original_tests
self.methods_index = self.original_methods
def filter_tests_with_no_activity(self):
"""
Filter tests with no activity (zero rows).
"""
active_tests = ~np.all(self.activity_matrix == 0, axis=1)
self.tests_index = self.tests_index[active_tests]
self.activity_matrix = self.activity_matrix[active_tests]
def filter_methods_with_no_activity(self):
"""
Filter methods with no activity (zero columns)
"""
active_methods = ~np.all(self.activity_matrix == 0, axis=0)
self.methods_index = self.methods_index[active_methods]
self.activity_matrix = self.activity_matrix[:, active_methods]
def filter_data_for_commit(self, changed_methods):
"""
Filter matrix and indexes based on commit.
Also, the changed data is filtered for tests/methods with no activity
:param changed_methods: indexes of methods changed by the commit
"""
self.activity_matrix = self.activity_matrix[:, changed_methods]
self.methods_index = self.methods_index[changed_methods]
# Filter no activity tests/methods
self.filter_tests_with_no_activity()
self.filter_methods_with_no_activity()
def get_changed_indexes_for_changelist(
self, changelist: List[List], ignore_changes: List
) -> object:
"""
Get the changed method indexes in the activity matrix based on the changelist
:param changelist: list of changed files (each element is pair with the type of change and the filename)
:param ignore_changes: list of file paths to be ignored
:return: on success, returns a list of changed indexes in the activity matrix.
on failure, returns a string describing the error case
"""
# Filter changelist before processing
changelist = [
change
for change in changelist
if not any(
(ignore in change[1]) or (change[1] == "/platform/trunk")
for ignore in ignore_changes
)
]
# Process changelist
new_files = []
changed_files = []
cs_pattern = self.branch + r"/(.*)\.cs$"
xaml_cs_pattern = self.branch + r"/(.*)xaml\.cs"
for x in changelist:
if re.search(cs_pattern, x[1]):
# Check if it's not a *.xaml.cs file
if not re.search(xaml_cs_pattern, x[1]):
filename = re.search(cs_pattern, x[1]).group(1)
dot_filename = filename.replace("/", ".")
changed_files.append(dot_filename)
# Check if new file and store in hash table
if x[0] == "A":
self.new_files[dot_filename] = 123
new_files.append(dot_filename)
# Check if modified an already known new file
elif self.new_files.get(dot_filename) is not None:
new_files.append(dot_filename)
# Check if no .cs files were changed
if not changed_files:
return "[Error] Changelist contains no covered .cs files"
# Check if only changed new files
if len(changed_files) == len(new_files):
return "[Error] Changelist contains only new files or modified new files"
# Map files to method indexes
changed_indexes = []
for method in self.methods_map.values():
if any(changed in method for changed in changed_files):
matched_methods = np.where(self.methods_index == method)
changed_indexes.append(matched_methods[0][0])
# Check if there are no method indexes to return
if not changed_indexes:
return "[Error] The provided activity matrix has no coverage data for the changed files"
return changed_indexes
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,307
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/selection/test_selection.py
|
# coding=utf-8
import random
from typing import List
from jmetal.core.problem import BinaryProblem
from jmetal.core.solution import BinarySolution
from jmetal.operator import BitFlipMutation
from jmetal.util.archive import CrowdingDistanceArchive
from jmetal.util.termination_criterion import StoppingByEvaluations
from backend.selection.binary_mopso import BMOPSO
from backend.selection.problem_data import ProblemData
class TestSelection(BinaryProblem):
def __init__(self, problem_data: ProblemData, objectives: List):
super(TestSelection, self).__init__()
self.objectives = objectives
self.activity_matrix = problem_data.activity_matrix
self.tests_index = problem_data.tests_index
self.methods_index = problem_data.methods_index
self.history_test_fails = problem_data.history_test_fails
self.history_test_exec_times = problem_data.history_test_execution_times
self.number_of_tests = self.activity_matrix.shape[0]
# self.number_of_objectives = 2
self.number_of_objectives = len(objectives)
self.number_of_variables = 1
self.number_of_constraints = 0
# self.obj_directions = [self.MAXIMIZE, self.MAXIMIZE]
# self.obj_labels = ["DDU", "Total Previous Test Failures"]
# self.obj_directions = [self.MAXIMIZE, self.MAXIMIZE, self.MINIMIZE]
# self.obj_labels = ['DDU', '# Test Failures', '# Tests Selected']
def get_name(self) -> str:
return "Test Selection Problem"
def create_solution(self) -> BinarySolution:
random.seed(123)
new_solution = BinarySolution(
number_of_variables=self.number_of_variables,
number_of_objectives=self.number_of_objectives,
)
new_solution.variables[0] = [
True if random.randint(0, 1) == 0 else False
for _ in range(self.number_of_tests)
]
return new_solution
def evaluate(self, solution: BinarySolution) -> BinarySolution:
solution.objectives = [func(self, solution) for func in self.objectives]
return solution
def my_binary_mopso(problem: TestSelection, swarm):
return BMOPSO(
problem=problem,
swarm_size=swarm,
epsilon=0.075,
mutation=BitFlipMutation(probability=0),
leaders=CrowdingDistanceArchive(100),
termination_criterion=StoppingByEvaluations(max=2000),
)
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,308
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/selection/objectives.py
|
# coding=utf-8
import numpy as np
from jmetal.core.solution import BinarySolution
from backend.selection.ddu_metric import ddu
from backend.selection.test_selection import TestSelection
def get_selected_matrix(particle: list, activity_matrix: np.ndarray) -> np.ndarray:
"""
Get subset of the activity matrix selected by the particle.
:param particle: a particle representing a candidate selection
:param activity_matrix: full activity matrix
:return: selected subset of the activity matrix
"""
particle = np.array(particle)
sub_matrix = activity_matrix[particle == 1]
return sub_matrix
def calculate_ddu(problem: TestSelection, solution: BinarySolution) -> float:
"""
Calculate DDU metric for a candidate solution.
:param problem: the test selection problem instance
:param solution: a candidate solution
:return: DDU value
"""
sub_matrix = get_selected_matrix(solution.variables[0], problem.activity_matrix)
if sub_matrix.size == 0:
return 0
ddu_value = ddu(sub_matrix)
return round(-1 * ddu_value, 2)
def calculate_norm_coverage(problem: TestSelection, solution: BinarySolution) -> float:
"""
Calculate normalized coverage for a candidate solution.
Note: the return value is negated to support objective maximization
:param problem: the test selection problem instance
:param solution: a candidate solution
:return: normalized coverage value
"""
sub_matrix = get_selected_matrix(solution.variables[0], problem.activity_matrix)
if sub_matrix.size == 0:
return 0
sum_tests = np.sum(sub_matrix, axis=0)
sum_tests[sum_tests > 0] = 1 # normalize to 1/0
return -1 * (np.sum(sum_tests) / sub_matrix.shape[1])
def calculate_coverage(problem: TestSelection, solution: BinarySolution) -> float:
"""
Calculate coverage without normalization for a candidate solution.
Note: the return value is negated to support objective maximization
:param problem: the test selection problem instance
:param solution: a candidate solution
:return: coverage value without normalization
"""
# consider only selected subset of matrix
sub_matrix = get_selected_matrix(solution.variables[0], problem.activity_matrix)
if sub_matrix.size == 0:
return 0
sum_tests = np.sum(sub_matrix, axis=0)
return -1 * (np.sum(sum_tests) / sub_matrix.shape[1])
def calculate_number_of_tests(problem: TestSelection, solution: BinarySolution) -> int:
"""
Calculate total number of tests selected for a candidate solution.
:param problem: the test selection problem instance
:param solution: a candidate solution
:return: total number of tests selected
"""
total_tests = len(problem.tests_index[solution.variables[0]])
if total_tests == 0:
total_tests = 123456
return total_tests
def calculate_test_fails(problem: TestSelection, solution: BinarySolution) -> int:
"""
Calculate total previous test failures for a candidate solution.
Note: the return value is negated to support objective maximization
:param problem: the test selection problem instance
:param solution: a candidate solution
:return: total previous test failures
"""
testfails_history = _parse_history_to_list(
problem.history_test_fails, problem.tests_index[solution.variables[0]]
)
return -1 * sum(testfails_history)
def calculate_exec_times(problem: TestSelection, solution: BinarySolution) -> float:
"""
Calculate total execution time for a candidate solution.
:param problem: the test selection problem instance
:param solution: a candidate solution
:return: total execution time
"""
test_exec_time_history = _parse_history_to_list(
problem.history_test_exec_times, problem.tests_index[solution.variables[0]]
)
return sum(test_exec_time_history)
def _parse_history_to_list(history_results: dict, selected_tests: np.ndarray) -> list:
"""
Helper method to parse an historical metrics map into a list of values based on the selected tests.
:param history_results: map of historical metrics
:param selected_tests: list of selected tests names
:return: list of historical metric values
"""
return [history_results.get(test, 0) for test in selected_tests]
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,309
|
danielcorreia96/MOTSD
|
refs/heads/master
|
/backend/selection/ddu_metric.py
|
# coding=utf-8
import numpy as np
def ddu(matrix: np.ndarray):
"""
Calculate DDU metric value for given activity matrix.
Reference: Perez, Alexandre, Rui Abreu, and Arie van Deursen. "A test-suite diagnosability metric for
spectrum-based fault localization approaches." Proceedings of the 39th International Conference on
Software Engineering. IEEE Press, 2017.
:param matrix: activity matrix
:return: DDU value
"""
return norm_density(matrix) * diversity(matrix) * uniqueness(matrix)
def norm_density(matrix: np.ndarray):
"""
Calculate normalized density for a given activity matrix.
:param matrix: activity matrix
:return: normalized density value
"""
return 1 - abs(1 - 2 * (np.count_nonzero(matrix) / matrix.size))
def diversity(matrix: np.ndarray):
"""
Calculate test diversity for a given activity matrix.
:param matrix: activity matrix
:return: test diversity value
"""
# using numpy magic from https://stackoverflow.com/a/27007787 to count identical rows
dt = np.dtype((np.void, matrix.dtype.itemsize * matrix.shape[1]))
b = np.ascontiguousarray(matrix).view(dt)
_, cnt = np.unique(b, return_counts=True)
numerator = sum(map(lambda x: x * (x - 1), cnt))
denominator = matrix.shape[0] * (matrix.shape[0] - 1)
if denominator == 0:
return 0
return 1 - numerator / denominator
def uniqueness(matrix: np.ndarray):
"""
Calculate uniqueness for a given activity matrix.
:param matrix: activity matrix
:return: uniqueness value
"""
# using numpy magic from https://stackoverflow.com/a/27007787 to count identical columns
dt = np.dtype((np.void, matrix.T.dtype.itemsize * matrix.T.shape[1]))
b = np.ascontiguousarray(matrix.T).view(dt)
_, cnt = np.unique(b, return_counts=True)
numerator = len(cnt)
denominator = matrix.T.shape[0]
if denominator == 0:
return 0
return numerator / denominator
|
{"/get_csv_lines.py": ["/backend/evaluation/summary.py"], "/backend/evaluation/execution_item.py": ["/backend/integrations/database.py", "/backend/selection/problem_data.py"], "/testsel_pipeline.py": ["/backend/selection/objectives.py", "/backend/evaluation/execution_item.py", "/backend/evaluation/summary.py", "/backend/integrations/svn_utils.py", "/backend/selection/problem_data.py", "/backend/selection/test_selection.py"], "/backend/evaluation/utils.py": ["/backend/evaluation/execution_item.py"], "/backend/opencover/parser.py": ["/backend/opencover/utils.py"], "/join_metrics.py": ["/backend/evaluation/summary.py", "/generate_tests.py"], "/backend/evaluation/summary.py": ["/backend/evaluation/execution_item.py", "/backend/selection/problem_data.py"], "/join_randoms.py": ["/backend/evaluation/summary.py"], "/backend/selection/test_selection.py": ["/backend/selection/binary_mopso.py", "/backend/selection/problem_data.py"], "/backend/selection/objectives.py": ["/backend/selection/ddu_metric.py", "/backend/selection/test_selection.py"]}
|
4,360
|
myousif9/MP2RAGE-wrapper
|
refs/heads/master
|
/create_pipeline_bg_remover.py
|
from bg_remover_wrapper import bgremover
from nipype.interfaces.io import DataGrabber, DataSink
from nipype.interfaces import utility as niu
from nipype.pipeline import Node, MapNode, Workflow
from nipype.interfaces.utility import Function
from bids.layout import BIDSLayout
import sys
import os
def replace_slash_fn(filename):
renamed="_".join(str(filename).split("/"))
return renamed
replace_slash = Function(input_names=["filename"],
output_names=["renamed"],
function=replace_slash_fn)
def create_pipeline_bgremover(bids_dir,work_dir,out_dir,subjects,sessions,reg,uni_match_pattern,inv1_match_pattern,inv2_match_pattern):
layout=BIDSLayout(bids_dir)
for subject in subjects:
if layout.get_sessions(subject=subject)==[]:
if sessions==['.*']:
first_uni_files=first_uni_files+layout.get(subject=subject,modality='anat',extensions='.*UNI.*.nii.*',)
first_inv1_files=first_inv1_files+layout.get(subject=subject,modality='anat',extention='.*inv-1.*.nii.*')
first_inv2_files=first_inv2_files+layout.get(subject=subject,modality='anat',extention='.*inv-2.*.nii.*')
else:
print("Warning: Session filter applied, but subject "+subject+"has no bids session information. This subject has been ignored.")
else:
for session in sessions:
first_uni_files=first_uni_files+layout.get(subject=subject,session=session,modality='anat',extensions='.*UNI.*.nii.*',)
first_inv1_files=first_inv1_files+layout.get(subject=subject,session=session,modality='anat',extention='.*inv-1.*.nii.*')
first_inv2_files=first_inv2_files+layout.get(subject=subject,session=session,modality='anat',extention='.*inv-2.*.nii.*')
uni_folders=[]
for img in first_uni_files:
full_dirname=os.path.dirname(img.filename)
remove_base_dir=full_dirname.replace(bids_dir,'')
remove_leading_slash=remove_base_dir.lstrip(os.sep)
uni_folders.append(remove_leading_slash)
list(set(uni_folders)).sort()
inv1_folders=[]
for img in first_inv1_files:
full_dirname=os.path.dirname(img.filename)
remove_base_dir=full_dirname.replace(bids_dir,'')
remove_leading_slash=remove_base_dir.lstrip(os.sep)
inv1_folders.append(remove_leading_slash)
list(set(inv1_folders)).sort()
inv2_folders=[]
for img in first_inv2_files:
full_dirname=os.path.dirname(img.filename)
remove_base_dir=full_dirname.replace(bids_dir,'')
remove_leading_slash=remove_base_dir.lstrip(os.sep)
inv2_folders.append(remove_leading_slash)
list(set(inv2_folders)).sort()
infosource_uni = Node(niu.IdentityInterface(fields=['uni']), name='infosource_uni')
infosource_uni.iterables = ('uni',uni_folders)
infosource_inv1 = Node(niu.IdentityInterface(fields=['inv1']),name='infosource_inv1')
infosource_inv1.iterables = ('inv1',inv1_folders)
infosource_inv2 = Node(niu.IdentityInterface(fields=['inv2']),name='infosource_inv2')
infosource_inv2.iterables = ('inv2',inv2_folders)
datasource=Node(DataGrabber(infields=['uni','inv1','inv2'],outfields=['uni_image','inv1_image','inv2_image']),name='datasource')
datasource.inputs.field_template=dict(
uni_image='%s/'+uni_match_pattern+'.nii*',
inv1_image='%d/'+inv1_match_pattern+'.nii*',
inv2_image='%f/'+inv2_match_pattern+'.nii*')
datasource.inputs.sort_filelist=True
datasource.inputs.template="*"
datasource.inputs.base_directory=bids_dir
t1w_gen = Node(bgremover(reg=reg),name = 'background_remover')
datasink = Node(DataSink(),name = 'datasink')
datasink.inputs.base_directory = out_dir +'/bg_remover/'
datasink.inputs.parameterization=False
rename_infosource=Node(replace_slash,"rename_infosource")
rename_t1w=Node(niu.Rename(format_string="%(uni)s-T1w", keep_ext = True),"rename_T1w")
pipelineDir=work_dir
wf = Workflow(name='bg_remover')
wf.base_dir=pipelineDir
wf.config['excecution']['remove_unnecessary_outputs']=False
wf.connect([
(infosource_uni,datasource,[('uni','uni')]),
(infosource_inv1,datasource,[('inv1','inv1')]),
(infosource_inv2,datasource,[('inv2','inv2')]),
(datasource, t1w_gen, [('uni_image','uni_in'),
('inv1_image','inv1_in'),
('inv2_image','inv2_in')]),
(t1w_gen, datasink, [('out_file','in_file')]),
(infosource_uni,rename_infosource, [('uni','filename')]),
(rename_infosource,rename_t1w,[('renamed','uni')]),
(rename_t1w,datasink,[('out_file','@')])
])
return wf
|
{"/create_pipeline_bg_remover.py": ["/bg_remover_wrapper.py"], "/run.py": ["/create_pipeline_bg_remover.py"]}
|
4,361
|
myousif9/MP2RAGE-wrapper
|
refs/heads/master
|
/bg_remover_wrapper.py
|
from nipype.interfaces.matlab import MatlabCommand
from nipype.interfaces.base import TraitedSpec, BaseInterface, BaseInterfaceInputSpec,File,traits
import os
from string import Template
matlab_script_loc = os.path.join(os.path.dirname(os.path.realpath(__file__)),'matlab_script)')
class bgremover_InputSpec(BaseInterfaceInputSpec):
uni_in = File(exists=True,
desc='input file for UNI image',
argstr="%s",
mandatory=True)
inv1_in = File(exisits=True,
desc='input file for INV1 image',
argstr="%s",
mandatory=True)
inv2_in = File(exisits=True,
desc='input file for INV2 image',
argstr="%s",
mandatory=True)
reg = traits.Float(desc='input value for regularization')
out_file = File('t1w_gen.nii.gz',
desc='name of output T1w image',
genfile=True,
usedefault=True)
class bgremover_OutputSpec(TraitedSpec):
out_file=File(desc="path/name of T1w file (if generated)",usedefault=True)
class bgremover(BaseInterface):
input_spec = bgremover_InputSpec
output_spec = bgremover_OutputSpec
def _run_interface(self, runtime):
# d = dict(uni = self.inputs.uni_in,
# inv1 = self.inputs.inv1_in,
# inv2 = self.inputs.inv2_in,
# reg = self.inputs.reg,
# denoise = self.inputs.out_file)
with open(os.path.join(matlab_script_loc,'DemoRemoveBackgroundNoise.m'),'r') as script_file:
script_content = script_file.read()
script = script_content.format(uni = self.inputs.uni_in,
inv1 = self.inputs.inv1_in,
inv2 = self.inputs.inv2_in,
denoise = self.inputs.out_file,
reg = self.inputs.reg)
mlab = MatlabCommand(script=script, mfile=True)
mlab.inputs.paths = [os.path.join(matlab_script_loc,'func/RobustCombination.m'),
os.path.join(matlab_script_loc,'nii_func/load_nii_ext.m'),
os.path.join(matlab_script_loc,'nii_func/load_nii_hdr.m'),
os.path.join(matlab_script_loc,'nii_func/load_untouch0_nii_hdr.m'),
os.path.join(matlab_script_loc,'nii_func/load_untouch_nii.m'),
os.path.join(matlab_script_loc,'nii_func/load_untouch_nii_hdr.m'),
os.path.join(matlab_script_loc,'nii_func/load_untouch_nii_img.m'),
os.path.join(matlab_script_loc,'nii_func/save_nii_ext.m'),
os.path.join(matlab_script_loc,'nii_func/save_untouch0_nii_hdr.m'),
os.path.join(matlab_script_loc,'nii_func/save_untouch_nii.m'),
os.path.join(matlab_script_loc,'nii_func/save_untouch_nii_hdr.m'),
os.path.join(matlab_script_loc,'nii_func/verify_nii_ext.m')]
result = mlab.run()
return result.runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
|
{"/create_pipeline_bg_remover.py": ["/bg_remover_wrapper.py"], "/run.py": ["/create_pipeline_bg_remover.py"]}
|
4,362
|
myousif9/MP2RAGE-wrapper
|
refs/heads/master
|
/run.py
|
from create_pipeline_bg_remover import create_pipeline_bgremover
import os
if __name__=="__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
from nipype import config, logging
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir',help='the directory with the input dataset formatted according to the BIDS standard.')
parser.add_argument('output_dir', help='The directory where the output files '
'should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.',
default=['.*'],
nargs="+")
parser.add_argument('--session_label', help='The label(s) of the session(s) that should be analyzed. The label '
'corresponds to ses-<session_label> from the BIDS spec '
'(so it does not include "ses-"). If this parameter is not '
'provided all sessions should be analyzed. Multiple '
'sessions can be specified with a space separated list.',
default=['.*'],
nargs="+")
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Work directory. Defaults to <output_dir>/scratch")
parser.add_argument("-l", "--log_dir", dest="log_dir",
help="Nipype output log directory. Defaults to <output_dir>/log")
parser.add_argument("-c", "--crash_dir", dest="crash_dir",
help="Nipype crash dump directory. Defaults to <output_dir>/crash_dump")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
parser.add_argument("--keep_unnecessary_outputs", dest="keep_unnecessary_outputs",
action='store_true',default=False,
help="keep all nipype node outputs, even if unused")
parser.add_argument('--uni_match_pattern', dest="uni_match_pattern",
default='*UNI*',
help='Pattern used to match UNI images and json files '
'in anat folder (leave extension out of pattern). The '
'pattern may contain simple shell-style wildcards a la '
'fnmatch. However, unlike fnmatch, filenames starting with '
'a dot are special cases that are not matched by \'*\' and '
'\'?\' patterns. Example usage: *acq-uni*')
parser.add_argument('--inv1_match_pattern', dest="inv1_match_pattern",
default='*inv-1*',
help='Pattern used to match inv1 images and json files '
'in anat folder (leave extension out of pattern). The '
'pattern may contain simple shell-style wildcards a la '
'fnmatch. However, unlike fnmatch, filenames starting with '
'a dot are special cases that are not matched by \'*\' and '
'\'?\' patterns. Example usage: *inv-1*')
parser.add_argument('--inv2_pattern', dest="inv2_match_pattern",
default='*inv-2*',
help='Pattern used to match inv2 images and json files '
'in anat folder (leave extension out of pattern). The '
'pattern may contain simple shell-style wildcards a la '
'fnmatch. However, unlike fnmatch, filenames starting with '
'a dot are special cases that are not matched by \'*\' and '
'\'?\' patterns. Example usage: *inv-2*')
parser.add_argument("--regularization", dest="regularization",
default=10,
help="regularization parameter")
args = parser.parse_args()
bids_dir=args.bids_dir
out_dir=args.output_dir
uni_match_pattern=args.uni_match_pattern
inv1_match_pattern=args.inv1_match_pattern
inv2_match_pattern=args.inv2_match_pattern
subjects=args.participant_label
sessions=args.session_label
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
else:
work_dir = os.path.join(out_dir, 'scratch')
if args.log_dir:
log_dir = os.path.abspath(args.log_dir)
else:
tmp="log-"+"_".join(subjects)+'-'+"_".join(sessions)
tmp=tmp.replace(".*","all").replace("*","star")
log_dir = os.path.join(out_dir, 'logs',tmp)
if args.crash_dir:
crash_dir = os.path.abspath(args.crash_dir)
else:
crash_dir = os.path.join(out_dir, 'crash_dump')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
config.update_config({'logging': {
'log_directory': log_dir,
'log_to_file': True,
},
'execution': {
'crashdump_dir': crash_dir,
'crashfile_format': 'txt',
}})
logging.update_logging(config)
plugin=args.plugin
plugin_args=args.plugin_args
keep_unnecessary_outputs=args.keep_unnecessary_outputs
regularization = float(args.regularization)
wf_bg_remover = create_pipeline_bgremover(bids_dir=bids_dir,
work_dir=work_dir,
out_dir=out_dir,
subjects=subjects,
sessions=sessions,
reg=regularization,
uni_match_pattern=uni_match_pattern,
inv1_match_pattern=inv1_match_pattern,
inv2_match_pattern=inv2_match_pattern)
if args.plugin_args:
exec_bg_remover=wf_bg_remover.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
exec_bg_remover=wf_bg_remover.run(args.plugin)
|
{"/create_pipeline_bg_remover.py": ["/bg_remover_wrapper.py"], "/run.py": ["/create_pipeline_bg_remover.py"]}
|
4,363
|
myousif9/MP2RAGE-wrapper
|
refs/heads/master
|
/MP2RAGE-wrapper/__init__.py
|
from .t1w_wrapper import T1wgen
|
{"/create_pipeline_bg_remover.py": ["/bg_remover_wrapper.py"], "/run.py": ["/create_pipeline_bg_remover.py"]}
|
4,372
|
Mironova66/module_test
|
refs/heads/main
|
/test_module3.py
|
import unittest
import module3
class Test1(unittest.TestCase):
def test1(self):
self.assertEqual(module3.module3(3111), 6)
def test2(self):
self.assertEqual(module3.module3(16), 7)
class Test2(unittest.TestCase):
def test1(self):
self.assertEqual(module3.module3(-101235), 12)
def test2(self):
self.assertEqual(module3.module3(456), 15)
class Test3(unittest.TestCase):
def test1(self):
self.assertEqual(module3.module3(-123456), 21)
def test2(self):
self.assertEqual(module3.module3("4vfdf"), "Input Error")
class Test4(unittest.TestCase):
def test1(self):
self.assertEqual(module3.module3(1), 1)
def test2(self):
self.assertEqual(module3.module3("----"), "Input Error")
if __name__ == "__main__":
unittest.main()
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,373
|
Mironova66/module_test
|
refs/heads/main
|
/module4.py
|
# Написать функцию,которая заключает целое число в рамку из символов char и возвращает данную строку
# Пример:
# frame(16, '+') ==>
# ++++++
# + 16 +
# ++++++
def module4(num, char):
if type(num) != int:
return "Input Error"
else:
s = str(num)
top=len(s)+4
Stroka=top*char
Stroka+="\n"+char+" "+s+" "+char
Stroka+="\n"+top*char
return Stroka
print(module4(-56, '/'))
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,374
|
Mironova66/module_test
|
refs/heads/main
|
/test_module4.py
|
import unittest
import module4
class Test1(unittest.TestCase):
def test1(self):
self.assertEqual(module4.module4(16, '+'), '++++++\n+ 16 +\n++++++')
def test2(self):
self.assertEqual(module4.module4("vgrve", '+'), "Input Error")
class Test2(unittest.TestCase):
def test1(self):
self.assertEqual(module4.module4(1234567, '-'), '-----------\n- 1234567 -\n-----------')
def test2(self):
self.assertEqual(module4.module4("...........", '+'), "Input Error")
class Test3(unittest.TestCase):
def test1(self):
self.assertEqual(module4.module4(-89, '/'), '///////\n/ -89 /\n///////')
def test2(self):
self.assertEqual(module4.module4("fff44", '.'), "Input Error")
class Test4(unittest.TestCase):
def test1(self):
self.assertEqual(module4.module4(-56, '/'), '///////\n/ -56 /\n///////')
def test2(self):
self.assertEqual(module4.module4("56ggd", '+'), "Input Error")
if __name__ == "__main__":
unittest.main()
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,375
|
Mironova66/module_test
|
refs/heads/main
|
/test_module2.py
|
import unittest
import module2
class Test1(unittest.TestCase):
def test1(self):
self.assertEqual(module2.module2(3111), "Input Error")
def test2(self):
self.assertEqual(module2.module2(713524), True)
class Test2(unittest.TestCase):
def test1(self):
self.assertEqual(module2.module2(101235), False)
def test2(self):
self.assertEqual(module2.module2(456), "Input Error")
class Test3(unittest.TestCase):
def test1(self):
self.assertEqual(module2.module2(123456), False)
def test2(self):
self.assertEqual(module2.module2("vgrer4w4"), "Input Error")
class Test4(unittest.TestCase):
def test1(self):
self.assertEqual(module2.module2(185536), True)
def test2(self):
self.assertEqual(module2.module2("/////////"), "Input Error")
if __name__ == "__main__":
unittest.main()
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,376
|
Mironova66/module_test
|
refs/heads/main
|
/test_module1.py
|
import unittest
import module1
class Test1(unittest.TestCase):
def test1(self):
self.assertEqual(module1.module1(156), True)
def test2(self):
self.assertEqual(module1.module1(34), True)
class Test2(unittest.TestCase):
def test1(self):
self.assertEqual(module1.module1(1), False)
def test2(self):
self.assertEqual(module1.module1("vgrvd"), "Input Error")
class Test3(unittest.TestCase):
def test1(self):
self.assertEqual(module1.module1("ккккк"), "Input Error")
def test2(self):
self.assertEqual(module1.module1(4444444447), False)
class Test4(unittest.TestCase):
def test1(self):
self.assertEqual(module1.module1("..не62"), "Input Error")
def test2(self):
self.assertEqual(module1.module1(-98), True)
if __name__ == "__main__":
unittest.main()
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,377
|
Mironova66/module_test
|
refs/heads/main
|
/module3.py
|
# Написать функцию, которая считает сумму всех цифр целого числа
def module3(num):
if type(num) != int: return "Input Error"
else:
sum = 0
num=abs(num)
k = len(str(num))+1
for i in range (k):
sum += num%10
num=num//10
return sum
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,378
|
Mironova66/module_test
|
refs/heads/main
|
/module1.py
|
# Написать функцию, которая определяет является ли целое число четным
# При ошибочном вводе вывести "Input Error"
def module1(num):
if type(num) != int: return "Input Error"
else:
num = abs(num)
if ((num % 2) == 0): return True
else: return False
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,379
|
Mironova66/module_test
|
refs/heads/main
|
/module2.py
|
# Написать функцию, которая определяет является ли шестизначное число "счастливым"
# (сумма первых трех цифр равна сумме последних трех цифр)
# При ошибочном вводе вывести "Input Error"
def module2(number):
if type(number) != int:
return "Input Error"
else:
if len(str(number)) != 6: return "Input Error"
sum1 = number // 100000 + number // 10000 % 10 + number // 1000 % 10
sum2 = number % 1000 // 100 + number % 100 // 10 + number % 10
return sum1 == sum2
|
{"/test_module3.py": ["/module3.py"], "/test_module4.py": ["/module4.py"], "/test_module2.py": ["/module2.py"], "/test_module1.py": ["/module1.py"]}
|
4,389
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/migrations/0001_initial.py
|
# Generated by Django 2.2.5 on 2019-11-03 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Giaovien', models.CharField(max_length=100)),
('Pubkey', models.CharField(max_length=100)),
('PubN', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Taokhoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('a', models.FloatField()),
('b', models.FloatField()),
('Ten', models.CharField(choices=[('Nguyễn Văn A', 'Nguyễn Văn A'), ('Trần Văn B', 'Trần Văn B'), ('Phạm Thị C', 'Phạm Thị C')], max_length=100)),
],
),
migrations.CreateModel(
name='Teach',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('KhoaE', models.FloatField()),
('KhoaN', models.FloatField()),
('Link', models.TextField(max_length=1000)),
('Ghichu', models.CharField(max_length=100)),
('MSSV', models.CharField(default='', max_length=25)),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Giaovien', models.CharField(choices=[('Nguyễn Văn A', 'Nguyễn Văn A'), ('Trần Văn B', 'Trần Văn B'), ('Phạm Thị C', 'Phạm Thị C')], max_length=1000)),
('image', models.ImageField(blank=True, null=True, upload_to='images/%Y/%m/%d/')),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Xac',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Giaovien', models.CharField(choices=[('Nguyễn Văn A', 'Nguyễn Văn A'), ('Trần Văn B', 'Trần Văn B'), ('Phạm Thị C', 'Phạm Thị C')], max_length=1000)),
],
),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,390
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/forms.py
|
from django import forms
from django.forms import ModelForm,Textarea
from .models import Taokhoa,Teach,Teacher,List,Xac
import re
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
class TeachForm(forms.ModelForm):
class Meta:
model = Teach
fields=['KhoaE','KhoaN','Ghichu','MSSV','Link']
widgets = {
'Link':Textarea(attrs={'cols':80,'rows':1})
}
labels = {
'KhoaE': ('Khóa bí mật'),
'KhoaN': ('Khóa N'),
'Link':('Path Image'),
}
class TaokhoaForm(forms.ModelForm):
class Meta:
model = Taokhoa
fields=['a','b','Ten','MSCB']
def clean_Ten(self):
Ten = self.cleaned_data['Ten']
c=List.objects.filter(Giaovien=Ten)
if 'Ten' in self.cleaned_data:
Ten = self.cleaned_data['Ten']
if len(c)==0:
return Ten
raise forms.ValidationError('Mật khẩu không hợp lệ')
class UpForm(forms.ModelForm):
class Meta:
model = Teacher
fields=['image','MSCB']
class ListForm(forms.ModelForm):
class Meta:
model = Xac
fields=['MSCB']
class dangnhapForm(forms.Form):
username = forms.CharField(label='Taikhoan', max_length=30)
email = forms.EmailField(label='Email')
password1 = forms.CharField(label='Mật Khẩu',widget=forms.PasswordInput())
password2 = forms.CharField(label='Nhập lại Mật Khẩu',widget=forms.PasswordInput())
def clean_password2(self):
if 'password1' in self.cleaned_data:
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1==password2 and password1:
return password2
raise forms.ValidationError('Mật khẩu không hợp lệ')
def clean_username(self):
username = self.cleaned_data['username']
if not re.search(r'^\w+$',username):
raise forms.ValidationError("Tên tài khoản có kí tự đặc biệt")
try:
User.objects.get(username=username)
except ObjectDoesNotExist:
return username
raise forms.ValidationError('Tài khoản đã tồn tại')
def save(self):
User.objects.create_user(username=self.cleaned_data['username'], email=self.cleaned_data['email'], password=self.cleaned_data['password1'])
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,391
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/views.py
|
from django.shortcuts import render,redirect
import math
from django.template import RequestContext
from django.http import HttpResponse
import random
from hashlib import sha1
import hashlib
from .forms import TeachForm
from .forms import TaokhoaForm,UpForm,ListForm,dangnhapForm
from .models import Teach,Teacher,List,Thongtin
import cv2
import numpy as np
import sys
from django.template import loader
from django.contrib.auth import authenticate,decorators,login
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import user_passes_test
import xlwt
from django.contrib.auth.models import User
# Create your views here.
#@decorators.login_required(login_url= '/login/')
def staff_required(login_url=None):
return user_passes_test(lambda u: u.is_staff, login_url=login_url)
def index(request):
return render(request,'VCS/home.html')
def danh(request):
allaccs= Thongtin.objects.all()
context= {'allaccs': allaccs}
return render(request,'VCS/linhtinh3.html',context)
def coprime(o, p):
while p!= 0:
o, p = p, o % p
return o
def extended_gcd(aa, bb):
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient*x, x
y, lasty = lasty - quotient*y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1)
def modinv(a, m):
g, x, y = extended_gcd(a, m)
if g != 1:
raise Exception('Modular inverse does not exist')
return x % m
def is_prime(num):
if num == 2:
return True
if num < 2 or num % 2 == 0:
return False
for n in range(3, int(num**0.5)+2, 2):
if num % n == 0:
return False
return True
def generate_keypair(p, q):
if not (is_prime(p) and is_prime(q)):
raise ValueError('Both numbers must be prime.')
elif p == q:
raise ValueError('p and q cannot be equal')
n = p * q
phi = (p-1) * (q-1)
e = random.randrange(1, phi)
g = coprime(e, phi)
while g != 1:
e = random.randrange(1, phi)
g = coprime(e, phi)
d = modinv(e, phi)
pri=(e,n)
pub=(d,n)
return ((e), (d),n,pri,pub)
def hashFunction(message):
hashed = sha1(message.encode("UTF-8")).hexdigest()
#hashed = hash(message)
return hashed
def encrypt(privatek, plaintext):
key, n = privatek
key=int(key)
n=int(n)
numberRepr = [ord(char) for char in plaintext]
cipher = [pow(ord(char),key,n) for char in plaintext]
return cipher
def decrypt(publick, ciphertext):
key, n = publick
key=int(key)
n=int(n)
numberRepr = [pow(char, key, n) for char in ciphertext]
plain = [chr(pow(char, key, n)) for char in ciphertext]
#print("Decrypted number representation is: ", numberRepr)
a=''.join(plain)
#Return the array of bytes as a string
return a
def hash_file(filename):
h = hashlib.sha1()
with open(filename,'rb') as file:
chunk = 0
while chunk != b'':
chunk = file.read(1024)
h.update(chunk)
return h.hexdigest()
def taokhoa(form):
a = form.a
b = form.b
Ten = form.Ten
MSCB = form.MSCB
public, private,n,t,y= generate_keypair(a, b)
#User = get_user_model()
#user = User.objects.get(id=self.user.id)
user =Tennguoiky
c = List(Giaovien=Ten,Pubkey = int(public),PubN= int(n),MSCB=MSCB,UserRegis=user)
c.save()
template = loader.get_template('VCS/linhtinh.html')
context = {
'public': int(public),'private': int(private),'n': int(n)
}
#response=HttpResponse()
#response.writelines( "<h3>Khoa bi mat bang=%s </h3><br/>" %(public))
#response.writelines( "<h3>khoa cong khai bang=%s</h3><br/>" %(private))
#response.writelines( "<h3>n bằng tích của hai số nguyên tố:%s </h3><br/>" %(n))
#return response
return HttpResponse(template.render(context))
@staff_required(login_url= '/login/')
def teach(request):
form = TeachForm(request.POST or None)
if form.is_valid():
form=form.save()
return Ky(form)
return render(request,'VCS/teach.html', {'form': form})
@staff_required(login_url= '/login/')
def index1(request):
form = TaokhoaForm(request.POST or None)
user = User.objects.get(username=request.user.username)
global Tennguoiky
Tennguoiky = user
if request.method == 'POST':
if form.is_valid():
form = form.save(commit=False)
return taokhoa(form)
else:
form = TaokhoaForm()
return render(request,'VCS/khoa.html', {'form': form})
def Ky(form):
d=form.KhoaE
n=form.KhoaN
a = form.Link
a1=form.MSSV
a2=form.Ghichu
y=(d,n)
np.set_printoptions(threshold=sys.maxsize)
img = cv2.imread(a,cv2.IMREAD_GRAYSCALE)
hash_object = hashlib.sha1(img)
hex_dig = hash_object.hexdigest()
s=hash_file(a)
print(hex_dig)
#print(s)
#encrypted_msg = encrypt(y, s)
encrypted_msg = encrypt(y, hex_dig)
encrypted_msg1 =str( encrypted_msg)
#print(encrypted_msg1)
z = nhung(a,encrypted_msg,a1,a2)
#print(z)
if not z=='Hình này không phù hợp cho việc nhúng':
a1 ='-sign'
a2=a[:len(a)-4]
#a3 = a[len(a)-4:len(a)]
a3='.bmp'
a4 = a2+a1+a3
cv2.imwrite(a4,z)
template = loader.get_template('VCS/linhtinh2.html')
return HttpResponse(template.render())
#response=HttpResponse()
#response.writelines( "<h3>khoa bi mat bang=%s </h3><br/>" %(encrypted_msg1))
#return response
else:
template = loader.get_template('VCS/linhtinh4.html')
return HttpResponse(template.render())
def list(request):
Teachers = Teacher.objects.all()
return render(request,'VCS/list.html',{'Teachers':Teachers})
#def Poss(request, id):
#post = Teacher.objects.get(id=id)
#return render(request,'VCS/post.html', {'post' : post})
def upload_file(request):
if request.method == 'POST':
form = UpForm(request.POST , files = request.FILES )
if form.is_valid():
form = form.save()
return redirect('upload_file')
else:
form = UpForm()
return render(request,'VCS/up.html',{'form':form})
def bang(request):
allaccs= List.objects.all()
context= {'allaccs': allaccs}
return render(request, 'VCS/bang.html', context)
def xac(request):
form = ListForm(request.POST or None)
if form.is_valid():
form = form.save()
return xacthuc(form)
return render(request,'VCS/xacthuc.html', {'form': form})
def xacthuc(form):
b=form.MSCB
a=Teacher.objects.filter(MSCB=b)
c = List.objects.get(MSCB=b)
c1=c.Pubkey
c2=c.PubN
pub=(float(c1),float(c2))
for i in range(0,len(a)):
try:
[encrypted_msg,Ghichu,MSSV,im] = giainhung(a[i].image.path)
date=a[i].date
#print(encrypted_msg)
d=decrypt(pub,encrypted_msg)
hash_object = hashlib.sha1(im)
hex_dig = hash_object.hexdigest()
s=hex_dig
#print('Giai mã chữ ký ra=',d)
#print(encrypted_msg)
#print('hash file gốc=',s)
Ghichu=str(Ghichu)
cv2.imwrite(r'D:\luanvan 11-12\filegoc.bmp',im)
if s==d:
Trangthai='File đc xác thực'
else:
Trangthai='File đã bị thay đổi'
p=List.objects.get(MSCB=b)
c = Thongtin(Giaovien=str(p.Giaovien),MSSV = str(MSSV),Trangthai=str(Trangthai) ,Ghichu=str(Ghichu),MSCB=b,date=str(date))
c.save()
except Exception:
p=List.objects.get(MSCB=b)
Giaovien=str(p.Giaovien)
MSSV='None'
MSCB=b
Ghichu='None'
date=a[i].date
Trangthai ='File đã bị thay đổi'
p=List.objects.get(MSCB=b)
c = Thongtin(Giaovien=str(p.Giaovien),MSSV = str(MSSV),Trangthai=str(Trangthai) ,Ghichu=str(Ghichu),MSCB=b,date=str(date))
c.save()
#(encrypted_msg,Ghichu,MSSV,im) = giainhung(a[i].image.path)
#d=decrypt(pub,encrypted_msg)
#hash_object = hashlib.sha1(im)
#hex_dig = hash_object.hexdigest()
#s=hex_dig
#Ghichu=str(Ghichu)
#if s==d:
#Trangthai='File đc xác thực'
#p=List.objects.get(MSCB=b)
#c = Thongtin(Giaovien=str(p.Giaovien),MSSV = str(MSSV),Trangthai=str(Trangthai) ,Ghichu=str(Ghichu),MSCB=b)
#c.save()
template = loader.get_template('VCS/linhtinh3.html')
allaccs= Thongtin.objects.all()
context= {'allaccs': allaccs}
return HttpResponse(template.render(context))
#response=HttpResponse()
#if s==d:
#response.writelines( "<h4>file đã đc xác thực <br/>" )
#else:
#response.writelines( "<h4>file đã bị thay đổi <br/>" )
#response.writelines( "<h3>hash=%s </h3><br/>" %(d) )
#response.writelines( "<h3>khoa bi mat bang=%s </h3><br/>" %(c1) )
#response.writelines( "<h3>khoa bi mat bang=%s </h3><br/>" %(c2) )
#response.writelines( "<h3>khoa bi mat bang=%s </h3><br/>" %(p) )
#return response
#@decorators.login_required(login_url= '/login/')
def register(request):
form = dangnhapForm()
if request.method == 'POST':
form = dangnhapForm(request.POST)
if form.is_valid():
form=form.save()
return HttpResponseRedirect('/')
return render(request,'VCS/register.html',{'form':form})
def export_users_xls(request):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="Danhsach.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Users')
# Sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['MSCB','Giáo viên', 'MSSV', 'Trạng thái', 'Ghi chú', ]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# Sheet body, remaining rows
font_style = xlwt.XFStyle()
rows = Thongtin.objects.all().values_list('MSCB','Giaovien', 'MSSV', 'Trangthai', 'Ghichu')
for row in rows:
row_num += 1
for col_num in range(len(row)):
ws.write(row_num, col_num, row[col_num], font_style)
wb.save(response)
return response
def nhung(link,sign,MSSV,Ghichu):
np.set_printoptions(threshold=sys.maxsize)
img = cv2.imread(link,cv2.IMREAD_GRAYSCALE)
im = cv2.imread(link,cv2.IMREAD_GRAYSCALE)
size = im.shape
histogram = [0] * 256
#q=np.histogram(im,bins=1,range=None)
for row in range(size[0]): # traverse by row (y-axis)
for col in range(size[1]): # traverse by column (x-axis)
histogram[im[row, col]] += 1
a = histogram
b = np.arange(256)
#print(histogram)
#print(b)
j1 = im[0,0];
j2 = im[0,1];
j3 = im[0,2];
j4 = im[0,3];
j5 = im[0,4];
j6 = im[0,5];
j7 = im[0,6];
j8 = im[0,7];
a[j1]=a[j1]-1;
a[j2]=a[j2]-1;
a[j3]=a[j3]-1;
a[j4]=a[j4]-1;
a[j5]=a[j5]-1;
a[j6]=a[j6]-1;
a[j7]=a[j7]-1;
a[j8]=a[j8]-1;
#print(a)
#Tim peak va toa do peak
max = a[0] + a[1]
for i in range(1,255):
if max<(a[i] + a[i+1]):
max = (a[i]+a[i+1])
peak = i
giatripeak=a[i]
giatriketiep=a[i+1]
#print(peak)
# chia histogram thanh 2 mien
sub1 = a[0:peak+1]
sub2=a[peak:256]
#Tim minL,R va toa do minL,R
sub11 = np.flip(sub1)
q1 = min(sub11)
w1 = np.argmin(sub11)
w1 = len(sub11)-w1-1
q2 = min(sub2)
w2 = np.argmin(sub2)
#Bieu dien gia tri nhi phan mien I1
i1 = '{0:08b}'.format(im[0,0])
i2 = '{0:08b}'.format(im[0,1])
i3 = '{0:08b}'.format(im[0,2])
i4 = '{0:08b}'.format(im[0,3])
i5 = '{0:08b}'.format(im[0,4])
i6 = '{0:08b}'.format(im[0,5])
i7 = '{0:08b}'.format(im[0,6])
i8 = '{0:08b}'.format(im[0,7])
#Tap thong tin bo tro:
#8 bit thap cua I1
V = i1[7]+i2[7]+i3[7]+i4[7]+i5[7]+i6[7]+i7[7]+i8[7]
#print(V)
#Gia tri nhi phan diem cuc tieu ben trai peak
minL = '{0:08b}'.format(w1)
#print(minL)
#So diem cuc tieu ben trai peak
CL = '{0:08b}'.format(q1)
#print(CL)
#Vi tri cac diem co gia tri minL
for i in range(0,8):
im[0][i] = 257
ML = np.argwhere(im == w1)
#print(ML)
#Gia tri nhi phan diem cuc tieu ben phai peak
minR = '{0:08b}'.format(w2+peak)
#print(minR)
# So diem cuc tieu ben phai peak
CR = '{0:08b}'.format(q2)
#print(CR)
#Vi tri cac diem co gia tri minR
MR = np.argwhere(im == w2+peak)
#print(MR)
bin9 = lambda x : ''.join(reversed( [str((x >> i) & 1) for i in range(9)] ) )
chieudaiB = giatripeak + giatriketiep
xixo1 = ''
xixo2 = ''
for i in range(1,len(ML)+1):
xixo1 = xixo1 + bin9(ML[i-1,0]) + bin9(ML[i-1,1])
for i in range(1,len(MR)+1):
xixo2 = xixo2 + bin9(MR[i-1,0]) + bin9(MR[i-1,1])
MLL = xixo1
MRR = xixo2
#print(MLL,MRR)
thongtinphu= V + minL + CL + MLL + minR + CR + MRR
chieudainhung = chieudaiB - len(thongtinphu)
#print('chieu dai nhung la :',chieudainhung)
if chieudainhung > 750:
chuki=sign
MSSV=MSSV
Ghichu=Ghichu
bin7 = lambda x : ''.join(reversed( [str((x >> i) & 1) for i in range(7)] ) )
bin10 = lambda x : ''.join(reversed( [str((x >> i) & 1) for i in range(10)] ) )
MSSV=[ord(c) for c in MSSV]
MSSV=[(bin7(a)) for a in MSSV]
MSSV=''.join(MSSV)
a= [ord(c) for c in Ghichu]
a=[(bin7(a)) for a in a]
a=''.join(a)
b=len(a)
b1=b
b=bin10(b)
a=a+b
Ghichu=a
bin14 = lambda x : ''.join(reversed( [str((x >> i) & 1) for i in range(14)] ) )
W=''
for i in range(0,40):
W = W + bin14(chuki[i])
zeros = [0] * (chieudainhung-560-b1-49-10)
zeros = ''.join(str(e) for e in zeros)
#print(MSSV)
#print(Ghichu)
W = zeros +MSSV+Ghichu+ W
#print(W)
B= thongtinphu + W
#print(thongtinphu)
#Tao cac cap histogram(peak,peak-1) và (peak+1,peak+2)bang dich chuyen histogram
for i in range(0,size[0]):
for j in range(0,size[1]):
if im[i,j] in range(w1+1,peak):
im[i,j] = im[i,j]-1
for i in range(0,size[0]):
for j in range(0,size[1]):
if im[i,j] in range(peak+2,w2+peak):
im[i,j] = im[i,j]+1
#nhung day bit B
k=-1
for i in range(0,size[0]):
for j in range(0,size[1]):
if im[i,j] in range(peak,peak+2):
k = k+1
#print(k)
if int(B[k]) == 0:
im[i,j] = im[i,j]
elif int(B[k]) == 1:
if im[i,j] == peak:
im[i,j] = im[i,j]-1
else:
im[i,j] = im[i,j]+1
for i in range(0,8):
im[0,i] = img[0,i]
#nhung peak vao i1
mask = 1 << 0
peak = '{0:08b}'.format(peak)
for i in range(0,8):
im[0,i] = (im[0,i] & ~mask) | ((int(peak[i]) << 0) & mask)
#cv2.imwrite('lan2.tiff',im)
#print(size)
#print(len(B))
#cv2.imshow('image',im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
return im
else:
a='Hình này không phù hợp cho việc nhúng'
return a
def giainhung(anh):
np.set_printoptions(threshold=sys.maxsize)
img = cv2.imread(anh,cv2.IMREAD_GRAYSCALE)
im = cv2.imread(anh,cv2.IMREAD_GRAYSCALE)
size = im.shape
#buoc2
i1 = '{0:08b}'.format(im[0,0])
i2 = '{0:08b}'.format(im[0,1])
i3 = '{0:08b}'.format(im[0,2])
i4 = '{0:08b}'.format(im[0,3])
i5 = '{0:08b}'.format(im[0,4])
i6 = '{0:08b}'.format(im[0,5])
i7 = '{0:08b}'.format(im[0,6])
i8 = '{0:08b}'.format(im[0,7])
peak = i1[7] + i2[7] + i3[7] + i4[7] + i5[7] + i6[7] + i7[7] + i8[7]
peak = int(peak,2)
#print(peak)
for i in range(0,8):
im[0,i] = 256
#buoc3
B=''
k=-1
for i in range(0,size[0]):
for j in range(0,size[1]):
if im[i,j] in range(peak-1,peak+3):
k = k+1
if im[i,j] in range(peak,peak+2):
B = B + '0'
else:
B = B + '1'
#print('day nhung B la',B)
#buoc4
#tim V
V=''
for i in range(0,8):
V=V + B[i]
#print('Gia tri của V',V)
#tim minL
minL=''
for i in range(8,16):
minL=minL + B[i]
#print('Gia tri của minL',minL)
#tim CL
CL=''
for i in range(16,24):
CL=CL + B[i]
#print('Gia tri của CL',CL)
#tim ML
lem = 9*2*int(CL,2)
ML =''
for i in range(24,24+lem):
ML=ML + B[i]
#print('Gia tri của ML',ML)
#tim minR
BL=''
for i in range(24+lem,len(B)):
BL=BL + B[i]
minR = ''
for i in range(0,8):
minR=minR + BL[i]
#print('Gia tri của minR',minR)
#tim CR
CR = ''
for i in range(8,16):
CR=CR + BL[i]
#print('Gia tri của CR',CR)
#tim MR
lemm = 9*2*int(CR,2)
MR =''
for i in range(16,16+lemm):
MR=MR + BL[i]
#print('Gia tri của MR',MR)
#print(len(MR))
# tim W
W=''
for i in range(16+lemm,len(BL)):
W=W + BL[i]
#print(len(W))
minL = int(minL,2)
minR = int(minR,2)
#print(len(W))
#buoc 5,2
for i in range(0,8):
im[0,i] =999
for i in range(0,size[0]):
for j in range(0,size[1]):
if im[i,j] in range(minL+1,minR):
if im[i,j] < peak:
im[i,j] = im[i,j] +1
elif im[i,j] > (peak +1):
im[i,j] = im[i,j] -1
MLL = ''
LM1 = ','
LM=''
for i in range(0,len(ML),9):
MLL1 =MLL + ML[i]+ML[i+1]+ML[i+2]+ML[i+3]+ML[i+4]+ML[i+5]+ML[i+6]+ML[i+7]+ML[i+8]
LM = LM +LM1 + str(int(MLL1,2))
LM = LM[1:]
LM = LM.split(',')
#print(LM)
MRR = ''
RM1 = ','
RM = ''
for i in range(0,len(MR),9):
MRR1 =MRR + MR[i]+MR[i+1]+MR[i+2]+MR[i+3]+MR[i+4]+MR[i+5]+MR[i+6]+MR[i+7]+MR[i+8]
RM = RM + RM1 + str(int(MRR1,2))
RM = RM[1:]
RM = RM.split(',')
#print(RM)
#buoc 5.3 khoi phuc cac diem anh co gia tri minL
tet = np.argwhere(im == minL)
#print(tet)
for i in range(0,len(tet)):
im[tet[i,0],tet[i,1]] = im[tet[i,0],tet[i,1]]+1
if LM != ['']:
for i in range(0,len(LM),2):
im[int(LM[i]),int(LM[i+1])] = im[int(LM[i]),int(LM[i+1])]-1
#buoc 5.3 khoi phuc cac diem anh co gia tri minR
tet = np.argwhere(im == minR)
#print(tet)
#print(len(tet))
for i in range(0,len(tet)):
im[tet[i,0],tet[i,1]] = im[tet[i,0],tet[i,1]]-1
if RM != ['']:
for i in range(0,len(RM),2):
im[int(RM[i]),int(RM[i+1])] = im[int(RM[i]),int(RM[i+1])]+1
tet = np.argwhere(im == minR)
#print(tet)
#print(len(tet))
for i in range(0,8):
im[0,i] = img[0,i]
mask = 1 << 0
for i in range(0,8):
im[0,i] = (im[0,i] & ~mask) | ((int(V[i]) << 0) & mask)
sign=W[len(W)-560:]
lenG=W[len(W)-560-10:len(W)-560]
lenG=int(lenG,2)
#print(lenG)
Ghichu=W[len(W)-560-10-lenG:len(W)-560-10]
MSSV=W[len(W)-560-10-lenG-49:len(W)-560-10-lenG]
W1=Ghichu
WWW=MSSV
#print(len(W1))
#print(WWW)
W=sign
e=''
p=''
q=','
for i in range(0,560,14):
e = W[i]+W[i+1]+W[i+2]+W[i+3]+W[i+4]+W[i+5]+W[i+6]+W[i+7]+W[i+8]+W[i+9]+W[i+10]+W[i+11]+W[i+12]+W[i+13]
p = p + str(int(e,2))+q
p = p.split(',')
p = p[:40]
p= [int(i) for i in p]
chuki=p
#print(p)
e=''
p=''
q=','
for i in range(0,len(W1),7):
e = W1[i]+W1[i+1]+W1[i+2]+W1[i+3]+W1[i+4]+W1[i+5]+W1[i+6]
p = p + str(int(e,2))+q
p = p.split(',')
p = p[:int(len(W1)/7)]
p= [int(i) for i in p]
p= [chr(p) for p in p]
e=''
for i in range(0,len(p)):
e=e+str(p[i])
Ghichu=e
e=''
p=''
q=','
for i in range(0,len(WWW),7):
e = WWW[i]+WWW[i+1]+WWW[i+2]+WWW[i+3]+WWW[i+4]+WWW[i+5]+WWW[i+6]
p = p + str(int(e,2))+q
p = p.split(',')
p = p[:int(len(WWW)/7)]
p= [int(i) for i in p]
p= [chr(p) for p in p]
e=''
for i in range(0,len(p)):
e=e+str(p[i])
MSSV=e
#cv2.imwrite(r'D:\luanvan 11-12\hinh4trich.bmp',im)
#cv2.imshow('image',im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
return (chuki,Ghichu,MSSV,im)
def ok(request):
return render(request,'VCS/login.html')
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,392
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/migrations/0003_teach_file.py
|
# Generated by Django 2.2.5 on 2019-11-05 12:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VCS', '0002_thongtin'),
]
operations = [
migrations.AddField(
model_name='teach',
name='File',
field=models.ImageField(null=True, upload_to=''),
),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,393
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/migrations/0005_auto_20191106_1411.py
|
# Generated by Django 2.2.5 on 2019-11-06 07:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VCS', '0004_auto_20191105_2024'),
]
operations = [
migrations.AlterField(
model_name='teach',
name='File',
field=models.ImageField(null=True, upload_to='Taokhoa/'),
),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,394
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/models.py
|
from django.db import models
# Create your models here.
class Taokhoa(models.Model):
teach_choice = (
('Nguyễn Văn A','Nguyễn Văn A'),
('Trần Văn B','Trần Văn B'),
('Phạm Thị C','Phạm Thị C'),
)
a = models.FloatField()
b = models.FloatField()
Ten = models.CharField(max_length=100,choices=teach_choice)
MSCB = models.CharField(null=True,max_length=7)
class Teach(models.Model):
KhoaE = models.FloatField()
KhoaN = models.FloatField()
Link = models.TextField(max_length=1000)
Ghichu=models.CharField(max_length=100)
MSSV = models.CharField(max_length=25,default='')
File = models.ImageField(null=True,upload_to='Taokhoa/')
class Teacher(models.Model):
teach_choice = (
('Nguyễn Văn A','Nguyễn Văn A'),
('Trần Văn B','Trần Văn B'),
('Phạm Thị C','Phạm Thị C'),
)
Giaovien = models.CharField(max_length=1000,choices=teach_choice)
image= models.ImageField(upload_to='images/%Y/%m/%d/', blank=True, null=True ) #1
date=models.DateTimeField(auto_now_add=True)
MSCB = models.CharField(max_length=7,null=True)
def __str__(self):
return self.MSCB
class List(models.Model):
Giaovien = models.CharField(max_length= 100)
Pubkey = models.CharField(max_length=100)
PubN = models.CharField(max_length= 100)
MSCB = models.CharField(max_length=7,null=True)
UserRegis = models.CharField(max_length=100,null=True)
def __str__(self):
return self.Giaovien
class Xac(models.Model):
teach_choice = (
('Nguyễn Văn A','Nguyễn Văn A'),
('Trần Văn B','Trần Văn B'),
('Phạm Thị C','Phạm Thị C'),
)
Giaovien = models.CharField(max_length=1000,choices=teach_choice)
MSCB = models.CharField(max_length=7,null=True)
class Thongtin(models.Model):
Giaovien=models.CharField(max_length=100)
MSSV = models.CharField(max_length=100)
Trangthai =models.CharField(max_length=20)
Ghichu = models.CharField(max_length=7)
MSCB = models.CharField(max_length=7,null=True)
date = models.CharField(max_length=100,null=True)
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,395
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/urls.py
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.index),
path('kianh/',views.teach),
path('tao/',views.index1),
path('list/',views.list),
#path('VCS/<int:id>/',views.Poss),
path('up/',views.upload_file, name='upload_file'),
path('bang/',views.bang),
path('xacthuc/',views.xac),
path('login/',auth_views.LoginView.as_view(template_name="VCS/login.html"), name="login"),
path('logout/',auth_views.LogoutView.as_view(next_page='/'),name='logout'),
path('register/',views.register,name='register'),
path('danh/',views.danh),
path('export/xls/$', views.export_users_xls, name='export_users_xls'),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,396
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/migrations/0004_auto_20191105_2024.py
|
# Generated by Django 2.2.5 on 2019-11-05 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VCS', '0003_teach_file'),
]
operations = [
migrations.AlterField(
model_name='thongtin',
name='Ghichu',
field=models.CharField(max_length=7),
),
migrations.AlterField(
model_name='thongtin',
name='Trangthai',
field=models.CharField(max_length=20),
),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,397
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/migrations/0010_list_userregis.py
|
# Generated by Django 2.2.5 on 2019-11-16 03:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VCS', '0009_auto_20191106_1556'),
]
operations = [
migrations.AddField(
model_name='list',
name='UserRegis',
field=models.CharField(max_length=100, null=True),
),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,398
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/migrations/0002_thongtin.py
|
# Generated by Django 2.2.5 on 2019-11-05 02:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('VCS', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Thongtin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Giaovien', models.CharField(max_length=100)),
('MSSV', models.CharField(max_length=100)),
('Trangthai', models.CharField(max_length=100)),
('Ghichu', models.CharField(max_length=100)),
],
),
]
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
4,399
|
obiito007/HungBK
|
refs/heads/master
|
/VCS/admin.py
|
from django.contrib import admin
from .models import Taokhoa
from .models import Teach,Teacher,List,Thongtin
# Register your models here.
#admin.site.register(Teach)
#admin.site.register(Taokhoa)
admin.site.register(Teacher)
admin.site.register(List)
admin.site.register(Thongtin)
|
{"/VCS/forms.py": ["/VCS/models.py"], "/VCS/views.py": ["/VCS/forms.py", "/VCS/models.py"], "/VCS/admin.py": ["/VCS/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.