code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""
Utililities
===========
This module holds all the core utility functions used throughout the library.
These functions are intended to simplify common tasks and to make their
output and functionality consistent where needed.
"""
import json
import os
from typing import Dict, List
import numpy as np # type: ignore
from comtypes.client import CreateObject # type: ignore
from _ctypes import COMError # type: ignore
from frewpy.models.exceptions import FrewError, NodeError
def _check_frew_path(file_path) -> None:
if not isinstance(file_path, str):
raise FrewError("The path must be a string.")
if not os.path.exists(file_path):
raise FrewError("Path to Frew model does not exist.")
if not file_path.lower().endswith(".fwd"):
raise FrewError("Path must be to a valid Frew model.")
def model_to_json(file_path) -> str:
""" Converts a `.fwd` Frew model to a `.json` Frew model.
Parameters
----------
file_path : str
Absolute file path to the '.fwd' Frew model.
Returns
-------
json_path : str
The new file path of the json file.
"""
_check_frew_path(file_path)
json_path: str = f'{file_path.rsplit(".", 1)[0]}.json'
try:
model = CreateObject("frewLib.FrewComAuto")
except OSError:
os.remove(file_path)
raise FrewError("Failed to create a COM object.")
try:
model.Open(file_path)
except COMError:
raise FrewError("Failed to open the Frew model.")
model.SaveAs(json_path)
model.Close()
return json_path
def check_json_path(file_path: str) -> None:
""" Checks whether the path is a valid json path.
Parameters
----------
file_path : str
Absolute file path to the Frew model.
Raises
------
FrewError
If the path is not a string, doesn't exists or is not to a json file.
"""
if not isinstance(file_path, str):
raise FrewError(
f"""
File path must be a string. Value {file_path} of type {type
(file_path)} given.
"""
)
if not os.path.exists(file_path):
raise FrewError("Frew model file path does not exists.")
if not file_path.lower().endswith(".json"):
raise FrewError(
"""
File extension must be a .json. Please use model_to_json to
convert it. Import this function from frewpy.utils.
"""
)
def load_data(file_path: str) -> Dict[str, list]:
""" Loads the json file in as a Python dictionary.
Parameters
----------
file_path : str
Absolute file path to the Frew model.
Returns
-------
json_data : Dict[str, list]
A Python dictionary of the data held within the json model file.
"""
with open(file_path) as file:
return json.loads(file.read())
def clear_results(json_data: dict) -> dict:
""" Clears the results in the json file so that it can be analysed using
the COM interface.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
json_data : dict
A Python dictionary of the data held within the json model file without
the results.
"""
if json_data.get("Frew Results", False):
del json_data["Frew Results"]
return json_data
def get_titles(json_data: dict) -> Dict[str, str]:
""" Returns the titles within the json model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
titles : Dict[str, str]
The project titles including Job Number, Job Title, Sub Title,
Calculation Heading, Initials, and Notes.
"""
try:
return json_data["OasysHeader"][0]["Titles"][0]
except KeyError:
raise FrewError("Unable to retreive title information.")
except IndexError:
raise FrewError("Unable to retreive title information.")
def get_file_history(json_data: dict) -> List[Dict[str, str]]:
""" Returns the file history of the Frew model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
file_history : List[Dict[str, str]]
Records of when the file has been opened in Frew and by which user.
"""
try:
return json_data["File history"]
except KeyError:
raise FrewError("Unable to retreive file history.")
def get_file_version(json_data: Dict[str, list]) -> str:
""" Returns the file version of the Frew model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
file_version : str
The version of the model file showing the exact build of Frew.
"""
try:
return json_data["OasysHeader"][0]["Program title"][0]["FileVersion"]
except KeyError:
raise FrewError("Unable to retreive file version.")
except IndexError:
raise FrewError("Unable to retreive file version.")
def get_frew_version(json_data: dict) -> str:
""" Returns the frew version required for the Frew model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
frew_version : str
The overall Frew version in which the model was created.
"""
try:
return json_data["OasysHeader"][0]["Program title"][0]["Version"]
except KeyError:
raise FrewError("Unable to retreive Frew model version.")
except IndexError:
raise FrewError("Unable to retreive Frew model version.")
def get_num_stages(json_data: dict) -> int:
""" Returns the number of stages in the model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
num_stages : int
The number of stages in the Frew model.
"""
try:
return len(json_data["Stages"])
except KeyError:
raise FrewError("Unable to retrieve the number of stages.")
def get_stage_names(json_data: dict) -> List[str]:
""" Returns the names of the stages within the Frew model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
stage_names : List[str]
A list of the names of stages within the Frew model.
"""
num_stages: int = get_num_stages(json_data)
return [json_data["Stages"][stage]["Name"] for stage in range(num_stages)]
def get_num_nodes(json_data: dict) -> int:
""" Returns the number of nodes in the Frew model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
num_nodes : int
The number of nodes present in each stage. This will always
just be 1 integer, and the function will raise an error if it is not
the same for every stage.
"""
num_stages = get_num_stages(json_data)
num_nodes: List[int] = []
for stage in range(num_stages):
if not json_data["Stages"][stage].get("GeoFrewNodes", False):
return 0
num_nodes.append(len(json_data["Stages"][stage]["GeoFrewNodes"]))
unique_num_nodes = np.unique(np.array(num_nodes))
if len(unique_num_nodes) == 1:
return unique_num_nodes[0]
raise NodeError("Number of nodes is not unique for every stage.")
def get_num_design_cases(json_data: dict) -> int:
""" Returns the number of design cases present in the model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
num_design_cases : int
The number of design cases in the Frew model.
"""
check_results_present(json_data)
return len(json_data["Frew Results"])
def get_design_case_names(json_data: dict) -> List[str]:
""" Returns the names of the design cases present in the model.
Parameters
----------
json_data : dict
A Python dictionary of the data held within the json model file.
Returns
-------
design_case_names : List[str]
The names of the design cases in the Frew model.
"""
check_results_present(json_data)
return [
design_case["GeoPartialFactorSet"]["Name"]
for design_case in json_data["Frew Results"]
]
def check_results_present(json_data: dict):
if not json_data.get("Frew Results", False):
raise FrewError(
"""
No results in the model, please analyse the model first.
"""
)
|
[
"os.remove",
"frewpy.models.exceptions.FrewError",
"os.path.exists",
"numpy.array",
"comtypes.client.CreateObject",
"frewpy.models.exceptions.NodeError"
] |
[((7631, 7690), 'frewpy.models.exceptions.NodeError', 'NodeError', (['"""Number of nodes is not unique for every stage."""'], {}), "('Number of nodes is not unique for every stage.')\n", (7640, 7690), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((580, 619), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""The path must be a string."""'], {}), "('The path must be a string.')\n", (589, 619), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((631, 656), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (645, 656), False, 'import os\n'), ((672, 719), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Path to Frew model does not exist."""'], {}), "('Path to Frew model does not exist.')\n", (681, 719), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((781, 829), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Path must be to a valid Frew model."""'], {}), "('Path must be to a valid Frew model.')\n", (790, 829), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((1249, 1284), 'comtypes.client.CreateObject', 'CreateObject', (['"""frewLib.FrewComAuto"""'], {}), "('frewLib.FrewComAuto')\n", (1261, 1284), False, 'from comtypes.client import CreateObject\n'), ((2117, 2142), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (2131, 2142), False, 'import os\n'), ((2158, 2208), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Frew model file path does not exists."""'], {}), "('Frew model file path does not exists.')\n", (2167, 2208), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((2271, 2443), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""\n File extension must be a .json. Please use model_to_json to\n convert it. Import this function from frewpy.utils.\n """'], {}), '(\n """\n File extension must be a .json. Please use model_to_json to\n convert it. Import this function from frewpy.utils.\n """\n )\n', (2280, 2443), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((8773, 8878), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""\n No results in the model, please analyse the model first.\n """'], {}), '(\n """\n No results in the model, please analyse the model first.\n """\n )\n', (8782, 8878), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((1313, 1333), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (1322, 1333), False, 'import os\n'), ((1348, 1391), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Failed to create a COM object."""'], {}), "('Failed to create a COM object.')\n", (1357, 1391), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((1466, 1509), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Failed to open the Frew model."""'], {}), "('Failed to open the Frew model.')\n", (1475, 1509), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((3914, 3964), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive title information."""'], {}), "('Unable to retreive title information.')\n", (3923, 3964), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((4002, 4052), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive title information."""'], {}), "('Unable to retreive title information.')\n", (4011, 4052), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((4530, 4575), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive file history."""'], {}), "('Unable to retreive file history.')\n", (4539, 4575), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((5062, 5107), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive file version."""'], {}), "('Unable to retreive file version.')\n", (5071, 5107), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((5145, 5190), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive file version."""'], {}), "('Unable to retreive file version.')\n", (5154, 5190), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((5666, 5717), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive Frew model version."""'], {}), "('Unable to retreive Frew model version.')\n", (5675, 5717), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((5755, 5806), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retreive Frew model version."""'], {}), "('Unable to retreive Frew model version.')\n", (5764, 5806), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((6216, 6269), 'frewpy.models.exceptions.FrewError', 'FrewError', (['"""Unable to retrieve the number of stages."""'], {}), "('Unable to retrieve the number of stages.')\n", (6225, 6269), False, 'from frewpy.models.exceptions import FrewError, NodeError\n'), ((7530, 7549), 'numpy.array', 'np.array', (['num_nodes'], {}), '(num_nodes)\n', (7538, 7549), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Author: <NAME> (<EMAIL>)
##########################
# Plotting configuration
##########################
from XtDac.DivideAndConquer import matplotlibConfig
# Use a matplotlib backend which does not show plots to the user
# (they will be saved in files)
import matplotlib
matplotlib.use("Agg")
rcParams = matplotlibConfig.getConfig()
for k, v in rcParams.iteritems():
matplotlib.rcParams[k] = v
###########################
import argparse
import os
import sys
import functools
import numpy
import warnings
import math
import logging
import multiprocessing
import scipy.stats
import time as timemod
try:
import astropy.io.fits as pyfits
except:
# If this fail there is no way out
import pyfits
pass
from XtDac.DivideAndConquer import GridGen
from XtDac.DivideAndConquer import HardwareUnit
from XtDac.DivideAndConquer import InterestingRegion
from XtDac.DivideAndConquer import Results
from XtDac.DivideAndConquer import TimeIntervalConsolidator
from XtDac.DivideAndConquer import XMMWCS
from XtDac.DivideAndConquer import Box
from XtDac.FixedBinSearch import Likelihood
from XtDac.FixedBinSearch import fitsRegions
time, X, Y, tstart, tstop, event_header = (None, None, None, None, None, None)
# Set up the logger (its verbosity level will be changed later on
# using the value provided by the user)
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("XtDac")
def validFITSfile(arg):
if not os.path.isfile(arg):
log.error("The file %s does not exist!" % arg)
sys.exit()
# Try to open it to see if it is a FITS file
try:
fits_file = pyfits.open(arg)
except:
log.error("The file %s is not a valid FITS file" % arg)
sys.exit()
else:
fits_file.close()
return arg
# def _initProcess(count, eventFile):
#
# global counter
# global time, X, Y, tstart, tstop
#
# counter = count
#
# # Read these here, so they will be read only one time for each of the
# # processes spawn by multiprocessing
# with pyfits.open(eventFile) as fitsFile:
#
# time = fitsFile['EVENTS'].data.field("TIME")
# X = fitsFile['EVENTS'].data.field("X")
# Y = fitsFile['EVENTS'].data.field("Y")
#
# # Sort by time
# idx = numpy.argsort(time)
# time = time[idx]
# X = X[idx]
# Y = Y[idx]
#
# tstart = fitsFile['EVENTS'].header.get("TSTART")
# tstop = fitsFile['EVENTS'].header.get("TSTOP")
#
def trueWorker(box_def, eventFile, nullHypProb, totRegions, bkgIdx=None):
box = Box.Box(*box_def)
box.setEventfile(eventFile)
box.readEvents(True, time, X, Y, tstart, tstop)
if (bkgIdx is not None):
# Use custom background (usually when looking in regions close to
# bright sources)
box.setBackground(bkgIdx)
# sys.stderr.write(str(box.nOutOfRegion))
if (box.isEmpty() or box.nInRegion < 2 or box.filledArea < 0.5):
results = []
else:
results = box.findExcesses(nullHypProb)
# box.clearMemory()
return results
def array2string(array):
return ",".join(map(lambda x: "%s" % x, array))
# from memory_profiler import profile
def _process_regions(eventfile, typeIerror, regions, n_cpus, log):
# Define a wrapper for the trueWorker to avoid duplicating all arguments
workerWrapper = functools.partial(trueWorker,
eventFile=eventfile,
nullHypProb=typeIerror,
totRegions=len(regions),
bkgIdx=None)
# Send each region to one worker to be processed,
# and collect the results
n_regions = len(regions)
results = []
# This is the unit for reporting progress
chunk_size = 30
progress_unit = max(chunk_size * n_cpus, n_regions / 20)
#progress_unit = max(n_cpus, int(float(n_regions) / 10.0))
if n_cpus > 1:
# Parallel version
log.debug("Starting a pool of %s python processes" % n_cpus)
pool = multiprocessing.Pool(n_cpus,
# initializer=_initProcess,
# initargs=(counter, args.eventfile)
)
log.debug("Feeding the regions to the processes...")
for i, result in enumerate(pool.imap(workerWrapper, regions, chunksize=chunk_size)):
if (i + 1) % progress_unit == 0 or (i + 1) == n_regions:
log.info("%s out of %s regions completed (%.0f percent)" % (i + 1, n_regions,
(i + 1) / float(n_regions) * 100))
results.append(result)
pool.close()
pool.join()
else:
# Serial version
log.debug("Using serial version (only 1 CPU)")
for i, region in enumerate(regions):
results.append(workerWrapper(region))
if (i + 1) % progress_unit == 0 or (i + 1) == n_regions:
log.info("%s out of %s regions completed (%.0f percent)" % (i + 1, n_regions,
(i + 1) / float(n_regions) * 100))
assert len(results) == n_regions, "Something went wrong in the computation. The number of results doesn't match " \
"the number of regions"
return results
# @profile
def go(args):
global time, X, Y, tstart, tstop, event_header
# Set up the logger
levels = {'info': logging.INFO, 'debug': logging.DEBUG}
log.level = levels[args.verbosity]
log.debug("Command line: %s" % (" ".join(sys.argv)))
# Instantiate the HardwareUnit class
log.debug("Probing Hardware Unit...")
hwu = HardwareUnit.hardwareUnitFactory(args.eventfile)
log.debug("probed %s" % hwu.getName())
log.debug("done")
log.debug("Reading event file...")
# Remember: X, Y, tstart, tstop, time, event_header are global variables (module-level)
with pyfits.open(args.eventfile) as fitsFile:
# Get events
X_ = fitsFile['EVENTS'].data.field("X")
Y_ = fitsFile['EVENTS'].data.field("Y")
time_ = fitsFile['EVENTS'].data.field("TIME")
# Order them by time
# Note that this is advanced slicing, hence it returns a COPY of X_, Y_ and time_.
# That's why we delete them explicitly immediately after, to save memory
idx = numpy.argsort(time_)
time = time_[idx]
X = X_[idx]
Y = Y_[idx]
event_header = fitsFile['EVENTS'].header
# Get the start of the first GTI and the stop of the last one
gti_starts = []
gti_stops = []
for ext in fitsFile[1:]:
if ext.header['EXTNAME'] == 'GTI':
gti_starts.append(ext.data.field("START").min())
gti_stops.append(ext.data.field("STOP").max())
# Since we might have randomized the times (in the Chandra pipeline, in the xtc_filter_event_file script),
# we need to make sure that the tstart is either the beginning of the first GTI or the time of the first event
tstart = min(min(gti_starts), time_.min() - 1e-3)
tstop = max(max(gti_stops), time_.max() + 1e-3)
# Now make arrays read-only so they will never be copied
X.setflags(write=False)
Y.setflags(write=False)
time.setflags(write=False)
# Save memory
del X_, Y_, time_
log.debug("done")
# Instantiate the GridGen class and generate the grid
# of regions
log.debug("Generating grid...")
if hwu.getName().find("ACIS")==0:
# Chandra. Automatically compute the step size, as the average
# size of the PSF in the detector
try:
import psf
import caldb4
from astropy.coordinates import SkyCoord
import astropy.units as u
except ImportError:
raise RuntimeError("Cannot import psf and/or caldb4 and or astropy module from CIAO. "
"Is CIAO python configured?")
cdb = caldb4.Caldb(telescope="CHANDRA", product="REEF")
reef = cdb.search[0]
extno = cdb.extno()
# Replace the trailing '[..]' block number specifier
reef = reef.split('[')[0] + "[{}]".format(extno + 1)
pdata = psf.psfInit(reef)
# Get the coordinates of the events at the corners of the CCD
# (don't use the minimum and maximum of X and Y because the CCD is rotated
# with respect to sky coordinates)
# Get the corners
minx = X.min()
maxx = X.max()
miny = Y.min()
maxy = Y.max()
corner_1 = [minx, Y[X == minx].max()]
corner_2 = [X[Y == maxy].max(), maxy]
corner_3 = [X[Y == miny].max(), miny]
corner_4 = [maxx, Y[X == maxx].max()]
# Get the aim point
ra_pointing = event_header.get('RA_PNT')
dec_pointing = event_header.get('DEC_PNT')
system = event_header.get("RADECSYS")
if system is None:
system = 'ICRS'
wcs = XMMWCS.XMMWCS(args.eventfile, X, Y)
x_pointing, y_pointing = wcs.sky2xy([[ra_pointing, dec_pointing]])[0]
# Computing maximum distance between corners and the pointing
distances_to_corners = numpy.linalg.norm(numpy.array([x_pointing, y_pointing]) -
numpy.array([corner_1, corner_2, corner_3, corner_4]),
axis=1)
max_distance = distances_to_corners.max()
pointing = SkyCoord(ra=ra_pointing * u.degree, dec=dec_pointing * u.degree, frame=system.lower())
def get_theta(x_, y_):
point = wcs.xy2sky([[x_, y_]])[0]
c1 = SkyCoord(ra=point[0] * u.degree, dec=point[1] * u.degree, frame=system.lower())
this_theta = c1.separation(pointing)
# Return it in arcmin
return this_theta.to(u.arcmin).value
def get_psf_size(x_, y_):
theta = get_theta(x_, y_)
return psf.psfSize(pdata, 1.5, theta, 0.0, 0.68)
gg = GridGen.GridGenChandra(hwu)
gg.makeGrid(x_pointing, y_pointing, get_psf_size, max_distance)
else:
# Something else. Use provided step size
gg = GridGen.GridGen(hwu)
gg.makeGrid(args.regionsize, 'sky', args.multiplicity)
log.debug("done")
# Get the boxes definitions
regions = gg.getBoxes()
# with open('test_variable_size.reg','w+') as f:
#
# for spec in regions:
#
# reg = Box.Box(*spec)
#
# f.write("%s" % "".join(reg.getDs9Region()))
#sys.exit(0)
n_events = time.shape[0]
log.info("Processing interval %s - %s (duration: %s s, %s events)"
% (tstart, tstop, tstop - tstart, n_events))
results = _process_regions(args.eventfile, args.typeIerror, regions, args.ncpus, log)
log.debug("done")
log.debug("Selecting interesting regions with more than one interval...")
interesting_regions = []
for i, intervals in enumerate(results):
# Remember: intervals contains the edges of the intervals
if len(intervals) > 2:
box = Box.Box(*regions[i])
box.setEventfile(args.eventfile)
box.readEvents(preLoaded=True, time=time, X=X, Y=Y, tstart=tstart, tstop=tstop)
#if args.writeRegionFiles == 'yes':
# box.writeRegion("%s_exc%i.reg" % (root_filename, i + 1))
# box.writeRegion("%s_exc%i.reg" % (root_filename, i + 1))
log.debug("Region %i has %i intervals" % (i+1, len(intervals)-1))
box.writeRegion("interesting_region_%i.reg" % (i+1))
for j,(t1,t2) in enumerate(zip(intervals[:-1],intervals[1:])):
log.debug(" %i : %s - %s (%s s long)" % (j+1, t1, t2, t2-t1))
thisInterestingRegion = InterestingRegion.InterestingRegion(box, intervals,
time, X, Y, tstart, tstop)
interesting_regions.append(thisInterestingRegion)
log.debug("done")
log.debug("Removing overlapping intervals...")
consolidator = TimeIntervalConsolidator.TimeIntervalConsolidator(interesting_regions)
cleanedIntervals = consolidator.consolidate()
log.debug("done")
log.info("Kept %s intervals" % len(cleanedIntervals))
if args.sigmaThreshold > 0:
# Import here to avoid to override the .use directive in xtdac
import matplotlib.pyplot as plt
# Now for each cleaned interval perform a likelihood analysis on a region
# larger than the box. This is needed otherwise it is too difficult to distinguish
# a PSF-like excess and a flat-like excess
log.info("Computing final significance...")
finalCandidates = []
for i, interval in enumerate(cleanedIntervals):
log.info("Processing interval %s of %s" % (i + 1, len(cleanedIntervals)))
this_interval_duration = interval.tstop - interval.tstart
log.info("duration: %.1f s" % this_interval_duration)
if this_interval_duration > args.max_duration:
log.info("Duration is longer than the maximum allowed of %.1f" % (args.max_duration))
continue
if interval.nEvents < args.min_number_of_events:
log.info("Less than %s events, discarding candidate with %s events" % (args.min_number_of_events,
interval.nEvents))
continue
boxx, boxy = interval.interestingRegion.getCenterPhysicalCoordinates()
boxw, boxh = interval.interestingRegion.box.width, interval.interestingRegion.box.height
# Search region is 25 arcsec of radius
if hwu.getName().find("ACIS")==0:
# For Chandra, let's adapt to the size of the PSF (but keep a minimum size of
# at least 50 px)
box_size_arcsec = max([boxw, boxh]) * hwu.getPixelScale()
radius = max(20.0, (box_size_arcsec /2.0) * 1.5 / hwu.getPixelScale())
log.info("Radius for search region: %s px" % radius)
else:
radius = 40.0 / hwu.getPixelScale()
searchRegionStr = 'physical;circle(%s,%s,%s)' % (boxx, boxy, radius)
log.info("Search region string: %s" % searchRegionStr)
idx = (time >= interval.tstart) & (time <= interval.tstop)
assert X[idx].shape[0] > 0
assert Y[idx].shape[0] > 0
with warnings.catch_warnings():
# Cause all warnings to be ignored
warnings.simplefilter("ignore")
searchRegionDef = fitsRegions.FitsRegion(X[idx], Y[idx], time[idx],
event_header, searchRegionStr)
# This is actually a loop of only one iteration
for x, y, t, regfilter, reg in searchRegionDef.iteritems():
xmin = boxx - radius
xmax = boxx + radius
ymin = boxy - radius
ymax = boxy + radius
if hwu.getName().find("ACIS") == 0:
# For Chandra, let's adapt to the size of the PSF, but make at least
# twice the size of the pixel (remember, this is in pixels)
r_binsize = max(radius / 60.0, 2.0)
else:
r_binsize = 40.0 / hwu.getPixelScale() / 10.0
assert (xmax-xmin) / r_binsize > 3.0, "Not enough bins for likelihood!"
searchRegion = Likelihood.Region(xmin, xmax,
ymin, ymax,
r_binsize, regfilter,
args.expomap, args.eventfile)
ls = Likelihood.Likelihood(x, y, searchRegion)
# Build the likelihood model
# Bkg model (isotropic)
iso = Likelihood.Isotropic("bkg", 1.0)
m = Likelihood.GlobalModel("likeModel") + iso
try:
ls.setModel(m)
except UnboundLocalError:
import pdb;pdb.set_trace()
# Minimize the mlogLike to get the background level
like0 = ls.minimize(verbose=0)
# Small TS map to figure out the source position
trial_x = numpy.linspace(boxx - boxw / 2.0, boxx + boxw / 2.0, 16)
trial_y = numpy.linspace(boxy - boxh / 2.0, boxy + boxh / 2.0, 16)
ra, dec = interval.interestingRegion.getCenterSkyCoordinates()
maxTS = 0
_best_x = None
_best_y = None
TSmap = numpy.zeros([trial_y.shape[0], trial_x.shape[0]])
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
for k, srcx in enumerate(trial_x):
for h, srcy in enumerate(trial_y):
sys.stderr.write(".")
# Now fit adding a source at the position of the maximum of the
# image of this region
# srcx, srcy = ls.getMaximumPosition()
if h == 0:
# This is the very first loop. Make the image of the PSF.
# We assume that the PSF does not change much within the region, so we
# will use the same image for all the next iterations
thisSrc = Likelihood.PointSource("testSrc", srcx, srcy, args.eventfile, hwu)
psffile = thisSrc.outfile
else:
# Re-use the psf file already computed
thisSrc = Likelihood.PointSource("testSrc", srcx, srcy,
args.eventfile, hwu,
psffile)
pass
m += thisSrc
ls.setModel(m)
like1 = ls.minimize(verbose=0)
TS = 2 * (like0 - like1)
TSmap[h, k] = TS
if TS > maxTS:
maxTS = TS
_best_x = srcx
_best_y = srcy
with warnings.catch_warnings():
# Cause all warnings to be ignored
warnings.simplefilter("ignore")
this_fig = ls.plot()
this_fig.savefig("c_%.4f_%.4f_%i.png" % (ra,dec,i))
plt.close(this_fig)
m.removeSource("testSrc")
# pr.disable()
# s = StringIO.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print s.getvalue()
# sys.exit(0)
sys.stderr.write("\n")
significance = math.sqrt(max(TSmap.max(), 0))
log.info("number of events: %s" % interval.nEvents)
log.info("Region @ (RA,Dec) = (%.4f,%.4f) (%.3f - %.3f s) "
"-> %.1f sigma" % (ra, dec,
interval.tstart,
interval.tstop,
significance))
if significance >= args.sigmaThreshold:
log.debug("Keeping candidate")
# One-sided probability of a n sigma effect
prob = scipy.stats.norm().sf(significance) / 2.0
interval.probability = prob
interval._best_localization = (_best_x, _best_y)
finalCandidates.append(interval)
plt.imshow(TSmap, interpolation='none', origin='lower')
plt.colorbar()
plt.savefig("exc%s_tsmap.png" % i, tight_layout=True)
plt.close()
else:
log.debug("Discarding candidate")
# plt.imshow(TSmap, interpolation='none', origin='lower')
# plt.savefig("exc%s.png" % i, tight_layout=True)
log.debug("done")
else:
log.debug("Threshold for final significance is <= 0, no likelihood analysis will be performed")
finalCandidates = cleanedIntervals
# The probability has not been computed, so let's assign -1
for c in finalCandidates:
c.probability = -1
pass
log.debug("Preparing the summary Ascii file...")
root_filename = ".".join(os.path.basename(args.eventfile).split(".")[:-1])
# I need to read the WCS from the event file, if transient_pos is true
wcs = None
if args.transient_pos:
wcs = XMMWCS.XMMWCS(args.eventfile, X, Y)
with open("%s_res.txt" % root_filename, "w+") as f:
f.write("#RA Dec Tstart Tstop Probability\n")
for i, interval in enumerate(finalCandidates):
if args.transient_pos:
# Write the position of the transient
points = wcs.xy2sky([list(interval._best_localization)])
ra, dec = points[0]
else:
# Write the position of the center of the region containing the transient
ra, dec = interval.interestingRegion.getCenterSkyCoordinates()
# Find the first and last event in the interval
idx = (time >= interval.tstart) & (time <= interval.tstop)
first_event_timestamp = time[idx].min()
last_event_timestamp = time[idx].max()
f.write("%.4f %.4f %.3f %.3f %.3g\n" % (ra, dec, first_event_timestamp - 1e-2, last_event_timestamp + 1e-2,
interval.probability))
if args.writeRegionFiles == 'yes':
interval.interestingRegion.box.writeRegion("%s_candidate_%i.reg" % (root_filename, i + 1))
log.debug("done")
jobStop = timemod.time()
if args.html_summary:
log.debug("Preparing the summary HTML file...")
# Summarize the results in a HTML file
resultsSummary = Results.Summary()
resultsSummary.addJobInfo(jobStop - jobStart, args.ncpus, args.typeIerror)
resultsSummary.addObsInfo(args.eventfile, hwu.getName(), tstart, tstop, n_events)
resultsSummary.addWholeHWUlightCurve(time, tstart, tstop, figsize=(8, 8))
resultsSummary.addResults(map(lambda interval: interval.interestingRegion, finalCandidates))
root_filename = ".".join(os.path.basename(args.eventfile).split(".")[:-1])
resultsSummary.write("%s_res.html" % root_filename)
log.debug("done")
pass
if __name__ == "__main__":
jobStart = timemod.time()
# Define and parse input arguments
desc = '''EXTraS Divide-and-Conquer algorithm to find
transients.'''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-e", "--eventfile",
help="Event file to use (already cleaned and selected in" +
" energy and quadrant/CCD)",
required=True, type=validFITSfile)
parser.add_argument("-x", "--expomap",
help="Exposure map for the whole observation. It is only" +
" needed to figure out gaps and masked pixels",
required=True, type=validFITSfile)
parser.add_argument("-b", "--backgroundRegion", help="Custom background circle. To be" +
" specified as ra,dec,radius. For example '-b 187.5," +
"-23.2,10' corresponds to a circle with 10 arcsec " +
"radius centered on R.A., Dec. = (187.5, -23.2)",
required=False, default=None)
parser.add_argument("-m", "--multiplicity", help="Control the overlap of the regions." +
" A multiplicity of 2 means the centers of the regions are" +
" shifted by 1/2 of the region size (they overlap by 50 percent)," +
" a multiplicity of 4 means they are shifted by 1/4 of " +
" their size (they overlap by 75 percent), and so on.",
required=False, default=2.0, type=float)
parser.add_argument("-c", "--ncpus", help="Number of CPUs to use (default=1)",
type=int, default=1, required=False)
parser.add_argument("-p", "--typeIerror",
help="Type I error probability for the Bayesian Blocks " +
"algorithm.",
type=float,
default=1e-5,
required=False)
parser.add_argument("-s", "--sigmaThreshold",
help="Threshold for the final significance. All intervals found " +
"by the bayesian blocks " +
"algorithm which does not surpass this threshold will not be saved in the " +
"final file.",
type=float,
default=5.0,
required=False)
parser.add_argument("-r", "--regionsize",
help="Approximate side for the square regions in which the" +
" search will be performed (Default: 60 arcsec)",
type=float,
default=60,
required=False)
parser.add_argument("--max_duration",
help="Do not consider candidate transients with a duration longer then max_duration",
type=float,
default=1e9,
required=False)
parser.add_argument("--min_number_of_events",
help="Do not consider candidate transients with less than this number of events",
type=int,
default=0,
required=False)
parser.add_argument("-w", "--writeRegionFiles",
help="Write a ds9 region file for each region with excesses?",
type=str,
default='yes',
required=False,
choices=['yes', 'no'])
parser.add_argument('--transient_pos', dest='transient_pos', action='store_true')
parser.set_defaults(transient_pos=False)
parser.add_argument('--no-html', dest='html_summary', action='store_false')
parser.set_defaults(html_summary=True)
parser.add_argument("-v", "--verbosity",
required=False,
default='info',
choices=['info', 'debug'])
args = parser.parse_args()
go(args)
|
[
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"XtDac.FixedBinSearch.Likelihood.PointSource",
"numpy.argsort",
"XtDac.DivideAndConquer.XMMWCS.XMMWCS",
"os.path.isfile",
"XtDac.DivideAndConquer.TimeIntervalConsolidator.TimeIntervalConsolidator",
"XtDac.DivideAndConquer.Results.Summary",
"XtDac.DivideAndConquer.GridGen.GridGenChandra",
"XtDac.FixedBinSearch.Likelihood.GlobalModel",
"warnings.simplefilter",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"caldb4.Caldb",
"XtDac.DivideAndConquer.HardwareUnit.hardwareUnitFactory",
"warnings.catch_warnings",
"numpy.linspace",
"XtDac.DivideAndConquer.InterestingRegion.InterestingRegion",
"XtDac.DivideAndConquer.Box.Box",
"psf.psfSize",
"XtDac.DivideAndConquer.matplotlibConfig.getConfig",
"XtDac.DivideAndConquer.GridGen.GridGen",
"os.path.basename",
"pyfits.open",
"XtDac.FixedBinSearch.fitsRegions.FitsRegion",
"XtDac.FixedBinSearch.Likelihood.Region",
"matplotlib.use",
"multiprocessing.Pool",
"sys.exit",
"logging.basicConfig",
"XtDac.FixedBinSearch.Likelihood.Isotropic",
"numpy.zeros",
"time.time",
"psf.psfInit",
"numpy.array",
"XtDac.FixedBinSearch.Likelihood.Likelihood",
"pdb.set_trace",
"sys.stderr.write",
"logging.getLogger"
] |
[((300, 321), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (314, 321), False, 'import matplotlib\n'), ((334, 362), 'XtDac.DivideAndConquer.matplotlibConfig.getConfig', 'matplotlibConfig.getConfig', ([], {}), '()\n', (360, 362), False, 'from XtDac.DivideAndConquer import matplotlibConfig\n'), ((1353, 1393), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1372, 1393), False, 'import logging\n'), ((1400, 1426), 'logging.getLogger', 'logging.getLogger', (['"""XtDac"""'], {}), "('XtDac')\n", (1417, 1426), False, 'import logging\n'), ((2596, 2613), 'XtDac.DivideAndConquer.Box.Box', 'Box.Box', (['*box_def'], {}), '(*box_def)\n', (2603, 2613), False, 'from XtDac.DivideAndConquer import Box\n'), ((5862, 5910), 'XtDac.DivideAndConquer.HardwareUnit.hardwareUnitFactory', 'HardwareUnit.hardwareUnitFactory', (['args.eventfile'], {}), '(args.eventfile)\n', (5894, 5910), False, 'from XtDac.DivideAndConquer import HardwareUnit\n'), ((12414, 12484), 'XtDac.DivideAndConquer.TimeIntervalConsolidator.TimeIntervalConsolidator', 'TimeIntervalConsolidator.TimeIntervalConsolidator', (['interesting_regions'], {}), '(interesting_regions)\n', (12463, 12484), False, 'from XtDac.DivideAndConquer import TimeIntervalConsolidator\n'), ((22482, 22496), 'time.time', 'timemod.time', ([], {}), '()\n', (22494, 22496), True, 'import time as timemod\n'), ((23246, 23260), 'time.time', 'timemod.time', ([], {}), '()\n', (23258, 23260), True, 'import time as timemod\n'), ((23425, 23466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (23448, 23466), False, 'import argparse\n'), ((1465, 1484), 'os.path.isfile', 'os.path.isfile', (['arg'], {}), '(arg)\n', (1479, 1484), False, 'import os\n'), ((1550, 1560), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1558, 1560), False, 'import sys\n'), ((1641, 1657), 'pyfits.open', 'pyfits.open', (['arg'], {}), '(arg)\n', (1652, 1657), False, 'import pyfits\n'), ((4122, 4150), 'multiprocessing.Pool', 'multiprocessing.Pool', (['n_cpus'], {}), '(n_cpus)\n', (4142, 4150), False, 'import multiprocessing\n'), ((6120, 6147), 'pyfits.open', 'pyfits.open', (['args.eventfile'], {}), '(args.eventfile)\n', (6131, 6147), False, 'import pyfits\n'), ((6552, 6572), 'numpy.argsort', 'numpy.argsort', (['time_'], {}), '(time_)\n', (6565, 6572), False, 'import numpy\n'), ((8234, 8283), 'caldb4.Caldb', 'caldb4.Caldb', ([], {'telescope': '"""CHANDRA"""', 'product': '"""REEF"""'}), "(telescope='CHANDRA', product='REEF')\n", (8246, 8283), False, 'import caldb4\n'), ((8480, 8497), 'psf.psfInit', 'psf.psfInit', (['reef'], {}), '(reef)\n', (8491, 8497), False, 'import psf\n'), ((9248, 9283), 'XtDac.DivideAndConquer.XMMWCS.XMMWCS', 'XMMWCS.XMMWCS', (['args.eventfile', 'X', 'Y'], {}), '(args.eventfile, X, Y)\n', (9261, 9283), False, 'from XtDac.DivideAndConquer import XMMWCS\n'), ((10304, 10331), 'XtDac.DivideAndConquer.GridGen.GridGenChandra', 'GridGen.GridGenChandra', (['hwu'], {}), '(hwu)\n', (10326, 10331), False, 'from XtDac.DivideAndConquer import GridGen\n'), ((10480, 10500), 'XtDac.DivideAndConquer.GridGen.GridGen', 'GridGen.GridGen', (['hwu'], {}), '(hwu)\n', (10495, 10500), False, 'from XtDac.DivideAndConquer import GridGen\n'), ((21260, 21295), 'XtDac.DivideAndConquer.XMMWCS.XMMWCS', 'XMMWCS.XMMWCS', (['args.eventfile', 'X', 'Y'], {}), '(args.eventfile, X, Y)\n', (21273, 21295), False, 'from XtDac.DivideAndConquer import XMMWCS\n'), ((22653, 22670), 'XtDac.DivideAndConquer.Results.Summary', 'Results.Summary', ([], {}), '()\n', (22668, 22670), False, 'from XtDac.DivideAndConquer import Results\n'), ((1744, 1754), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1752, 1754), False, 'import sys\n'), ((10248, 10289), 'psf.psfSize', 'psf.psfSize', (['pdata', '(1.5)', 'theta', '(0.0)', '(0.68)'], {}), '(pdata, 1.5, theta, 0.0, 0.68)\n', (10259, 10289), False, 'import psf\n'), ((11410, 11430), 'XtDac.DivideAndConquer.Box.Box', 'Box.Box', (['*regions[i]'], {}), '(*regions[i])\n', (11417, 11430), False, 'from XtDac.DivideAndConquer import Box\n'), ((12106, 12184), 'XtDac.DivideAndConquer.InterestingRegion.InterestingRegion', 'InterestingRegion.InterestingRegion', (['box', 'intervals', 'time', 'X', 'Y', 'tstart', 'tstop'], {}), '(box, intervals, time, X, Y, tstart, tstop)\n', (12141, 12184), False, 'from XtDac.DivideAndConquer import InterestingRegion\n'), ((16367, 16399), 'XtDac.FixedBinSearch.Likelihood.Isotropic', 'Likelihood.Isotropic', (['"""bkg"""', '(1.0)'], {}), "('bkg', 1.0)\n", (16387, 16399), False, 'from XtDac.FixedBinSearch import Likelihood\n'), ((16785, 16841), 'numpy.linspace', 'numpy.linspace', (['(boxx - boxw / 2.0)', '(boxx + boxw / 2.0)', '(16)'], {}), '(boxx - boxw / 2.0, boxx + boxw / 2.0, 16)\n', (16799, 16841), False, 'import numpy\n'), ((16864, 16920), 'numpy.linspace', 'numpy.linspace', (['(boxy - boxh / 2.0)', '(boxy + boxh / 2.0)', '(16)'], {}), '(boxy - boxh / 2.0, boxy + boxh / 2.0, 16)\n', (16878, 16920), False, 'import numpy\n'), ((17095, 17144), 'numpy.zeros', 'numpy.zeros', (['[trial_y.shape[0], trial_x.shape[0]]'], {}), '([trial_y.shape[0], trial_x.shape[0]])\n', (17106, 17144), False, 'import numpy\n'), ((19405, 19427), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (19421, 19427), False, 'import sys\n'), ((9483, 9520), 'numpy.array', 'numpy.array', (['[x_pointing, y_pointing]'], {}), '([x_pointing, y_pointing])\n', (9494, 9520), False, 'import numpy\n'), ((9572, 9625), 'numpy.array', 'numpy.array', (['[corner_1, corner_2, corner_3, corner_4]'], {}), '([corner_1, corner_2, corner_3, corner_4])\n', (9583, 9625), False, 'import numpy\n'), ((14890, 14915), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (14913, 14915), False, 'import warnings\n'), ((14986, 15017), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (15007, 15017), False, 'import warnings\n'), ((15053, 15138), 'XtDac.FixedBinSearch.fitsRegions.FitsRegion', 'fitsRegions.FitsRegion', (['X[idx]', 'Y[idx]', 'time[idx]', 'event_header', 'searchRegionStr'], {}), '(X[idx], Y[idx], time[idx], event_header, searchRegionStr\n )\n', (15075, 15138), False, 'from XtDac.FixedBinSearch import fitsRegions\n'), ((15965, 16063), 'XtDac.FixedBinSearch.Likelihood.Region', 'Likelihood.Region', (['xmin', 'xmax', 'ymin', 'ymax', 'r_binsize', 'regfilter', 'args.expomap', 'args.eventfile'], {}), '(xmin, xmax, ymin, ymax, r_binsize, regfilter, args.\n expomap, args.eventfile)\n', (15982, 16063), False, 'from XtDac.FixedBinSearch import Likelihood\n'), ((16228, 16269), 'XtDac.FixedBinSearch.Likelihood.Likelihood', 'Likelihood.Likelihood', (['x', 'y', 'searchRegion'], {}), '(x, y, searchRegion)\n', (16249, 16269), False, 'from XtDac.FixedBinSearch import Likelihood\n'), ((16417, 16452), 'XtDac.FixedBinSearch.Likelihood.GlobalModel', 'Likelihood.GlobalModel', (['"""likeModel"""'], {}), "('likeModel')\n", (16439, 16452), False, 'from XtDac.FixedBinSearch import Likelihood\n'), ((20247, 20302), 'matplotlib.pyplot.imshow', 'plt.imshow', (['TSmap'], {'interpolation': '"""none"""', 'origin': '"""lower"""'}), "(TSmap, interpolation='none', origin='lower')\n", (20257, 20302), True, 'import matplotlib.pyplot as plt\n'), ((20319, 20333), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (20331, 20333), True, 'import matplotlib.pyplot as plt\n'), ((20350, 20403), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('exc%s_tsmap.png' % i)"], {'tight_layout': '(True)'}), "('exc%s_tsmap.png' % i, tight_layout=True)\n", (20361, 20403), True, 'import matplotlib.pyplot as plt\n'), ((20420, 20431), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20429, 20431), True, 'import matplotlib.pyplot as plt\n'), ((16576, 16591), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (16589, 16591), False, 'import pdb\n'), ((17379, 17400), 'sys.stderr.write', 'sys.stderr.write', (['"""."""'], {}), "('.')\n", (17395, 17400), False, 'import sys\n'), ((21076, 21108), 'os.path.basename', 'os.path.basename', (['args.eventfile'], {}), '(args.eventfile)\n', (21092, 21108), False, 'import os\n'), ((17912, 17978), 'XtDac.FixedBinSearch.Likelihood.PointSource', 'Likelihood.PointSource', (['"""testSrc"""', 'srcx', 'srcy', 'args.eventfile', 'hwu'], {}), "('testSrc', srcx, srcy, args.eventfile, hwu)\n", (17934, 17978), False, 'from XtDac.FixedBinSearch import Likelihood\n'), ((18154, 18229), 'XtDac.FixedBinSearch.Likelihood.PointSource', 'Likelihood.PointSource', (['"""testSrc"""', 'srcx', 'srcy', 'args.eventfile', 'hwu', 'psffile'], {}), "('testSrc', srcx, srcy, args.eventfile, hwu, psffile)\n", (18176, 18229), False, 'from XtDac.FixedBinSearch import Likelihood\n'), ((23060, 23092), 'os.path.basename', 'os.path.basename', (['args.eventfile'], {}), '(args.eventfile)\n', (23076, 23092), False, 'import os\n'), ((18757, 18782), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (18780, 18782), False, 'import warnings\n'), ((18877, 18908), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (18898, 18908), False, 'import warnings\n'), ((19068, 19087), 'matplotlib.pyplot.close', 'plt.close', (['this_fig'], {}), '(this_fig)\n', (19077, 19087), True, 'import matplotlib.pyplot as plt\n')]
|
from flask import Flask
from flask import make_response
from flask import request
app = Flask(__name__)
@app.route('/')
def index():
user_id = request.cookies.get('user_id')
user_name = request.cookies.get('user_name')
return '%s --- %s' % (user_id, user_name)
@app.route('/login')
def login():
# 默认判断账号与密码是正确的
response = make_response('success')
# 设置cookie
response.set_cookie('user_id', '1', max_age=3600)
response.set_cookie('user_name', 'laowang', max_age=3600)
return response
@app.route('/logout')
def logout():
response = make_response('success')
response.delete_cookie('user_id')
response.delete_cookie('user_name')
return response
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask.make_response",
"flask.Flask",
"flask.request.cookies.get"
] |
[((89, 104), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (94, 104), False, 'from flask import Flask\n'), ((150, 180), 'flask.request.cookies.get', 'request.cookies.get', (['"""user_id"""'], {}), "('user_id')\n", (169, 180), False, 'from flask import request\n'), ((197, 229), 'flask.request.cookies.get', 'request.cookies.get', (['"""user_name"""'], {}), "('user_name')\n", (216, 229), False, 'from flask import request\n'), ((347, 371), 'flask.make_response', 'make_response', (['"""success"""'], {}), "('success')\n", (360, 371), False, 'from flask import make_response\n'), ((576, 600), 'flask.make_response', 'make_response', (['"""success"""'], {}), "('success')\n", (589, 600), False, 'from flask import make_response\n')]
|
import uuid
from sqlalchemy import Column
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import as_declarative, declared_attr
@as_declarative()
class Base:
id: uuid.UUID = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
__name__: str
# Generate __tablename__ automatically
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
|
[
"sqlalchemy.ext.declarative.as_declarative",
"sqlalchemy.dialects.postgresql.UUID"
] |
[((163, 179), 'sqlalchemy.ext.declarative.as_declarative', 'as_declarative', ([], {}), '()\n', (177, 179), False, 'from sqlalchemy.ext.declarative import as_declarative, declared_attr\n'), ((219, 237), 'sqlalchemy.dialects.postgresql.UUID', 'UUID', ([], {'as_uuid': '(True)'}), '(as_uuid=True)\n', (223, 237), False, 'from sqlalchemy.dialects.postgresql import UUID\n')]
|
from numpy.core.numeric import outer
import torch
from torch import log, mean, nn
import torch.nn.functional as F
import numpy as np
class VGAE_Encoder(nn.Module):
def __init__(self, n_in, n_hid, n_out, adj=None):
super(VGAE_Encoder, self).__init__()
self.n_out = n_out
self.base_gcn = GraphConv2(n_in, n_hid, adj=adj)
self.gcn1 = GraphConv2(n_hid, n_out, activation=F.elu, adj=adj)
self.gcn2 = GraphConv2(n_out, n_out, activation=F.elu, adj=adj)
self.gcn3 = GraphConv2(n_out, n_out*2, activation=lambda x:x, adj=adj)
def forward(self, x):
hidden = self.base_gcn(x)
out = self.gcn1(hidden)
out = self.gcn2(out)
out = self.gcn3(out)
mean = out[:, :self.n_out]
std = out[:, self.n_out:]
return mean, std
def set_gcn_adj(self, adj):
self.base_gcn.adj = adj
self.gcn1.adj = adj
self.gcn2.adj = adj
self.gcn3.adj = adj
class VAE_Encoder(nn.Module):
def __init__(self, n_in, n_hidden, n_out, keep_prob=1.0) -> None:
super(VAE_Encoder, self).__init__()
self.n_out = n_out
self.layer1 = nn.Linear(n_in, n_hidden)
self.layer2 = nn.Linear(n_hidden, n_out)
self.layer3 = nn.Linear(n_hidden, n_out)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.01)
def forward(self, inputs):
h0 = self.layer1(inputs)
h0 = F.relu(h0)
mean = self.layer2(h0)
logvar = self.layer3(h0)
# logvar = F.hardtanh(logvar, min_val=0, max_val=30)
return mean, logvar
class VAE_Bernulli_Decoder(nn.Module):
def __init__(self, n_in, n_hidden, n_out, keep_prob=1.0) -> None:
super(VAE_Bernulli_Decoder, self).__init__()
self.layer1 = nn.Linear(n_in, n_hidden)
self.layer2 = nn.Linear(n_hidden, n_out)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.01)
def forward(self, inputs):
h0 = self.layer1(inputs)
h0 = F.relu(h0)
x_hat = self.layer2(h0)
return x_hat
class Encoder(nn.Module):
def __init__(self, n_in, n_hid, n_out, adj=None):
super(Encoder,self).__init__()
self.n_out = n_out
self.base_gcn = GraphConv2(n_in, n_hid, adj=adj)
self.gcn1 = GraphConv2(n_hid, n_out, activation=F.elu, adj=adj)
self.gcn2 = GraphConv2(n_out, n_out, activation=F.elu, adj=adj)
self.gcn3 = GraphConv2(n_out, n_out, activation=lambda x:x, adj=adj)
def forward(self, x):
hidden = self.base_gcn(x)
out = self.gcn1(hidden)
out = self.gcn2(out)
out = self.gcn3(out)
alpha = torch.exp(out/4)
alpha = F.hardtanh(alpha, min_val=1e-2, max_val=30)
return alpha
def set_gcn_adj(self, adj):
self.base_gcn.adj = adj
self.gcn1.adj = adj
self.gcn2.adj = adj
self.gcn3.adj = adj
class Encoder2(nn.Module):
def __init__(self, n_in, n_hidden, n_out, keep_prob=1.0) -> None:
super(Encoder2, self).__init__()
self.n_out = n_out
self.layer1 = nn.Linear(n_in, n_hidden)
self.layer2 = nn.Linear(n_hidden, n_out)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.01)
def forward(self, inputs):
h0 = self.layer1(inputs)
h0 = F.relu(h0)
out = self.layer2(h0)
alpha = torch.exp(out/4)
alpha = F.hardtanh(alpha, min_val=1e-2, max_val=30)
return alpha
class Encoder3(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, keep_prob=1.0) -> None:
super(Encoder3, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(hidden_dim, output_dim)
# self._init_weight()
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Linear):
# nn.init.xavier_normal_(m.weight.data)
# m.bias.data.fill_(0.01)
def forward(self, x):
out = x.view(x.size(0), -1)
out = self.fc1(out)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.fc4(out)
# out = torch.exp(out/4)
# out = F.hardtanh(out, min_val=1e-2, max_val=30)
return out
class VGAE_Decoder(nn.Module):
def __init__(self, n_in, n_hid, n_out, n_label, keep_prob=1.0):
super(VGAE_Decoder,self).__init__()
self.n_label = n_label
self.layer1 = nn.Sequential(nn.Linear(n_in,n_hid),
nn.Tanh(),
nn.Dropout(1-keep_prob))
self.layer2 = nn.Sequential(nn.Linear(n_hid,n_hid),
nn.ELU(),
nn.Dropout(1-keep_prob))
self.fc_out = nn.Linear(n_hid,n_out)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.01)
def forward(self, z):
h0 = self.layer1(z)
h1 = self.layer2(h0)
features_hat = self.fc_out(h1)
labels_hat = z[:, -self.n_label:]
adj_hat = dot_product_decode(z)
return features_hat, labels_hat, adj_hat
class Decoder(nn.Module):
def __init__(self, n_in, n_hid, n_out, n_label, keep_prob=1.0):
super(Decoder,self).__init__()
self.n_label = n_label
self.layer1 = nn.Sequential(nn.Linear(n_in,n_hid),
nn.Tanh(),
nn.Dropout(1-keep_prob))
self.layer2 = nn.Sequential(nn.Linear(n_hid,n_hid),
nn.ELU(),
nn.Dropout(1-keep_prob))
self.layer3 = nn.Sequential(nn.Linear(n_in,n_hid),
nn.Tanh(),
nn.Dropout(1-keep_prob))
self.layer4 = nn.Sequential(nn.Linear(n_hid,n_label),
nn.ELU(),
nn.Dropout(1-keep_prob))
self.fc_out = nn.Linear(n_hid,n_out)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.01)
def forward(self, z):
h0 = self.layer1(z)
h1 = self.layer2(h0)
features_hat = self.fc_out(h1)
h2 = self.layer3(z)
h3 = self.layer4(h2)
labels_hat = h3
adj_hat = dot_product_decode(z)
return features_hat, labels_hat, adj_hat
class GraphConv(nn.Module):
def __init__(self, n_in, n_out, adj, activation = F.relu, **kwargs):
super(GraphConv, self).__init__(**kwargs)
self.weight = glorot_init(n_in, n_out)
self.adj = adj
self.activation = activation
def forward(self, inputs):
x = inputs
x = torch.mm(x,self.weight)
x = torch.mm(self.adj, x)
outputs = self.activation(x)
return outputs
class GraphConv2(nn.Module):
def __init__(self, n_in, n_out, activation = F.relu, adj=None, **kwargs):
super(GraphConv2, self).__init__(**kwargs)
self.weight = glorot_init(n_in, n_out)
self.adj = adj
self.activation = activation
def forward(self, inputs):
x = inputs
x = torch.mm(x,self.weight)
x = torch.spmm(self.adj, x)
outputs = self.activation(x)
return outputs
def dot_product_decode(Z):
A_pred = torch.sigmoid(torch.matmul(Z,Z.t()))
return A_pred
def glorot_init(input_dim, output_dim):
init_range = np.sqrt(6.0/(input_dim + output_dim))
initial = torch.rand(input_dim, output_dim)*2*init_range - init_range
return nn.Parameter(initial)
|
[
"torch.nn.Parameter",
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.rand",
"torch.nn.Tanh",
"torch.nn.init.xavier_normal_",
"torch.mm",
"torch.spmm",
"torch.exp",
"torch.nn.ELU",
"torch.nn.functional.hardtanh",
"torch.nn.Linear",
"torch.nn.functional.relu",
"numpy.sqrt"
] |
[((8286, 8325), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (input_dim + output_dim))'], {}), '(6.0 / (input_dim + output_dim))\n', (8293, 8325), True, 'import numpy as np\n'), ((8409, 8430), 'torch.nn.Parameter', 'nn.Parameter', (['initial'], {}), '(initial)\n', (8421, 8430), False, 'from torch import log, mean, nn\n'), ((1170, 1195), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_hidden'], {}), '(n_in, n_hidden)\n', (1179, 1195), False, 'from torch import log, mean, nn\n'), ((1218, 1244), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_out'], {}), '(n_hidden, n_out)\n', (1227, 1244), False, 'from torch import log, mean, nn\n'), ((1267, 1293), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_out'], {}), '(n_hidden, n_out)\n', (1276, 1293), False, 'from torch import log, mean, nn\n'), ((1605, 1615), 'torch.nn.functional.relu', 'F.relu', (['h0'], {}), '(h0)\n', (1611, 1615), True, 'import torch.nn.functional as F\n'), ((1954, 1979), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_hidden'], {}), '(n_in, n_hidden)\n', (1963, 1979), False, 'from torch import log, mean, nn\n'), ((2002, 2028), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_out'], {}), '(n_hidden, n_out)\n', (2011, 2028), False, 'from torch import log, mean, nn\n'), ((2332, 2342), 'torch.nn.functional.relu', 'F.relu', (['h0'], {}), '(h0)\n', (2338, 2342), True, 'import torch.nn.functional as F\n'), ((2992, 3010), 'torch.exp', 'torch.exp', (['(out / 4)'], {}), '(out / 4)\n', (3001, 3010), False, 'import torch\n'), ((3025, 3068), 'torch.nn.functional.hardtanh', 'F.hardtanh', (['alpha'], {'min_val': '(0.01)', 'max_val': '(30)'}), '(alpha, min_val=0.01, max_val=30)\n', (3035, 3068), True, 'import torch.nn.functional as F\n'), ((3428, 3453), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_hidden'], {}), '(n_in, n_hidden)\n', (3437, 3453), False, 'from torch import log, mean, nn\n'), ((3476, 3502), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'n_out'], {}), '(n_hidden, n_out)\n', (3485, 3502), False, 'from torch import log, mean, nn\n'), ((3814, 3824), 'torch.nn.functional.relu', 'F.relu', (['h0'], {}), '(h0)\n', (3820, 3824), True, 'import torch.nn.functional as F\n'), ((3871, 3889), 'torch.exp', 'torch.exp', (['(out / 4)'], {}), '(out / 4)\n', (3880, 3889), False, 'import torch\n'), ((3904, 3947), 'torch.nn.functional.hardtanh', 'F.hardtanh', (['alpha'], {'min_val': '(0.01)', 'max_val': '(30)'}), '(alpha, min_val=0.01, max_val=30)\n', (3914, 3947), True, 'import torch.nn.functional as F\n'), ((4140, 4172), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_dim'], {}), '(input_dim, hidden_dim)\n', (4149, 4172), False, 'from torch import log, mean, nn\n'), ((4194, 4203), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4201, 4203), False, 'from torch import log, mean, nn\n'), ((4223, 4256), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (4232, 4256), False, 'from torch import log, mean, nn\n'), ((4278, 4287), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4285, 4287), False, 'from torch import log, mean, nn\n'), ((4307, 4340), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (4316, 4340), False, 'from torch import log, mean, nn\n'), ((4362, 4371), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4369, 4371), False, 'from torch import log, mean, nn\n'), ((4391, 4424), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'output_dim'], {}), '(hidden_dim, output_dim)\n', (4400, 4424), False, 'from torch import log, mean, nn\n'), ((5498, 5521), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_out'], {}), '(n_hid, n_out)\n', (5507, 5521), False, 'from torch import log, mean, nn\n'), ((6697, 6720), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_out'], {}), '(n_hid, n_out)\n', (6706, 6720), False, 'from torch import log, mean, nn\n'), ((7562, 7586), 'torch.mm', 'torch.mm', (['x', 'self.weight'], {}), '(x, self.weight)\n', (7570, 7586), False, 'import torch\n'), ((7598, 7619), 'torch.mm', 'torch.mm', (['self.adj', 'x'], {}), '(self.adj, x)\n', (7606, 7619), False, 'import torch\n'), ((8011, 8035), 'torch.mm', 'torch.mm', (['x', 'self.weight'], {}), '(x, self.weight)\n', (8019, 8035), False, 'import torch\n'), ((8047, 8070), 'torch.spmm', 'torch.spmm', (['self.adj', 'x'], {}), '(self.adj, x)\n', (8057, 8070), False, 'import torch\n'), ((5258, 5280), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_hid'], {}), '(n_in, n_hid)\n', (5267, 5280), False, 'from torch import log, mean, nn\n'), ((5297, 5306), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5304, 5306), False, 'from torch import log, mean, nn\n'), ((5324, 5349), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - keep_prob)'], {}), '(1 - keep_prob)\n', (5334, 5349), False, 'from torch import log, mean, nn\n'), ((5385, 5408), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (5394, 5408), False, 'from torch import log, mean, nn\n'), ((5425, 5433), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (5431, 5433), False, 'from torch import log, mean, nn\n'), ((5451, 5476), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - keep_prob)'], {}), '(1 - keep_prob)\n', (5461, 5476), False, 'from torch import log, mean, nn\n'), ((6201, 6223), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_hid'], {}), '(n_in, n_hid)\n', (6210, 6223), False, 'from torch import log, mean, nn\n'), ((6240, 6249), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6247, 6249), False, 'from torch import log, mean, nn\n'), ((6267, 6292), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - keep_prob)'], {}), '(1 - keep_prob)\n', (6277, 6292), False, 'from torch import log, mean, nn\n'), ((6328, 6351), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_hid'], {}), '(n_hid, n_hid)\n', (6337, 6351), False, 'from torch import log, mean, nn\n'), ((6368, 6376), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (6374, 6376), False, 'from torch import log, mean, nn\n'), ((6394, 6419), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - keep_prob)'], {}), '(1 - keep_prob)\n', (6404, 6419), False, 'from torch import log, mean, nn\n'), ((6455, 6477), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_hid'], {}), '(n_in, n_hid)\n', (6464, 6477), False, 'from torch import log, mean, nn\n'), ((6494, 6503), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6501, 6503), False, 'from torch import log, mean, nn\n'), ((6521, 6546), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - keep_prob)'], {}), '(1 - keep_prob)\n', (6531, 6546), False, 'from torch import log, mean, nn\n'), ((6582, 6607), 'torch.nn.Linear', 'nn.Linear', (['n_hid', 'n_label'], {}), '(n_hid, n_label)\n', (6591, 6607), False, 'from torch import log, mean, nn\n'), ((6624, 6632), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (6630, 6632), False, 'from torch import log, mean, nn\n'), ((6650, 6675), 'torch.nn.Dropout', 'nn.Dropout', (['(1 - keep_prob)'], {}), '(1 - keep_prob)\n', (6660, 6675), False, 'from torch import log, mean, nn\n'), ((1445, 1482), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (1467, 1482), False, 'from torch import log, mean, nn\n'), ((2176, 2213), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (2198, 2213), False, 'from torch import log, mean, nn\n'), ((3654, 3691), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (3676, 3691), False, 'from torch import log, mean, nn\n'), ((5668, 5705), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (5690, 5705), False, 'from torch import log, mean, nn\n'), ((6867, 6904), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (6889, 6904), False, 'from torch import log, mean, nn\n'), ((8338, 8371), 'torch.rand', 'torch.rand', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (8348, 8371), False, 'import torch\n')]
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from matplotlib import font_manager
from matplotlib import rc
from matplotlib import gridspec
import pandas as pd
import os
# 0. font
#font_name=font_manager.FontProperties(fname='/usr/share/fonts')
if __name__ == '__main__':
df=pd.read_csv(f'{os.getcwd()}/file.csv', index_col='idx')
png_name=f'{os.getcwd()}/picture.png'
title='L2 Cache Hit Ratio'
colors=['fuchsia', 'orange', 'gold', 'limegreen', 'blue', 'darkblue']
ax1=plt.figure(figsize=(5,4))
gs=gridspec.GridSpec(nrows=1, ncols=1, bottom=0.15, top=0.92)
ax1=plt.subplot(gs[0])
ax2=ax1.twinx()
lines=[]
labels=[]
line1=ax1.plot(df['name'],df['score'],color=colors[0],linewidth=2)
lines.append(line1[0])
labels.append('score!')
line2=ax2.plot(df['name'],df['weight'],color=colors[3],linewidth=2)
lines.append(line2[0])
labels.append('weight!')
ax1.set_xlabel('Name')
ax1.set_ylabel('Score')
ax2.set_ylabel('Weight')
ax1.grid(True)
ax1.set_title(title,fontsize=5)
plt.legend(lines,labels,fontsize=10,loc='upper left')
plt.savefig(png_name)
plt.close()
|
[
"matplotlib.pyplot.subplot",
"os.getcwd",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.savefig"
] |
[((506, 532), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (516, 532), True, 'import matplotlib.pyplot as plt\n'), ((539, 597), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(1)', 'bottom': '(0.15)', 'top': '(0.92)'}), '(nrows=1, ncols=1, bottom=0.15, top=0.92)\n', (556, 597), False, 'from matplotlib import gridspec\n'), ((606, 624), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (617, 624), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1129), 'matplotlib.pyplot.legend', 'plt.legend', (['lines', 'labels'], {'fontsize': '(10)', 'loc': '"""upper left"""'}), "(lines, labels, fontsize=10, loc='upper left')\n", (1083, 1129), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1152), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_name'], {}), '(png_name)\n', (1142, 1152), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1168), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1166, 1168), True, 'import matplotlib.pyplot as plt\n'), ((364, 375), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (373, 375), False, 'import os\n'), ((306, 317), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (315, 317), False, 'import os\n')]
|
""" Find a nearby root of the coupled radial/angular Teukolsky equations.
TODO Documentation.
"""
from __future__ import division, print_function, absolute_import
import logging
import numpy as np
from scipy import optimize
from .angular import sep_const_closest, C_and_sep_const_closest
from . import radial
# TODO some documentation here, better documentation throughout
class NearbyRootFinder(object):
"""Object to find and store results from simultaneous roots of
radial and angular QNM equations, following the
Leaver and Cook-Zalutskiy approach.
Parameters
----------
a: float [default: 0.]
Dimensionless spin of black hole, 0 <= a < 1.
s: int [default: -2]
Spin of field of interest
m: int [default: 2]
Azimuthal number of mode of interest
A_closest_to: complex [default: 4.+0.j]
Complex value close to desired separation constant. This is
intended for tracking the l-number of a sequence starting
from the analytically-known value at a=0
l_max: int [default: 20]
Maximum value of l to include in the spherical-spheroidal
matrix for finding separation constant and mixing
coefficients. Must be sufficiently larger than l of interest
that angular spectral method can converge. The number of
l's needed for convergence depends on a.
omega_guess: complex [default: .5-.5j]
Initial guess of omega for root-finding
tol: float [default: sqrt(double epsilon)]
Tolerance for root-finding omega
cf_tol: float [defailt: 1e-10]
Tolerance for continued fraction calculation
n_inv: int [default: 0]
Inversion number of radial infinite continued fraction,
which selects overtone number of interest
Nr: int [default: 300]
Truncation number of radial infinite continued
fraction. Must be sufficiently large for convergence.
Nr_min: int [default: 300]
Floor for Nr (for dynamic control of Nr)
Nr_max: int [default: 4000]
Ceiling for Nr (for dynamic control of Nr)
r_N: complex [default: 1.]
Seed value taken for truncation of infinite continued
fraction. UNUSED, REMOVE
"""
def __init__(self, *args, **kwargs):
# Set defaults before using values in kwargs
self.a = 0.
self.s = -2
self.m = 2
self.A0 = 4.+0.j
self.l_max = 20
self.omega_guess = .5-.5j
self.tol = np.sqrt(np.finfo(float).eps)
self.cf_tol = 1e-10
self.n_inv = 0
self.Nr = 300
self.Nr_min = 300
self.Nr_max = 4000
self.r_N = 1.
self.set_params(**kwargs)
def set_params(self, *args, **kwargs):
"""Set the parameters for root finding. Parameters are
described in the class documentation. Finally calls
:meth:`clear_results`.
"""
# TODO This violates DRY, do better.
self.a = kwargs.get('a', self.a)
self.s = kwargs.get('s', self.s)
self.m = kwargs.get('m', self.m)
self.A0 = kwargs.get('A_closest_to', self.A0)
self.l_max = kwargs.get('l_max', self.l_max)
self.omega_guess = kwargs.get('omega_guess', self.omega_guess)
self.tol = kwargs.get('tol', self.tol)
self.cf_tol = kwargs.get('cf_tol', self.cf_tol)
self.n_inv = kwargs.get('n_inv', self.n_inv)
self.Nr = kwargs.get('Nr', self.Nr)
self.Nr_min = kwargs.get('Nr_min', self.Nr_min)
self.Nr_max = kwargs.get('Nr_max', self.Nr_max)
self.r_N = kwargs.get('r_N', self.r_N)
# Optional pole factors
self.poles = np.array([])
# TODO: Check that values make sense
self.clear_results()
def clear_results(self):
"""Clears the stored results from last call of :meth:`do_solve`"""
self.solved = False
self.opt_res = None
self.omega = None
self.A = None
self.C = None
self.cf_err = None
self.n_frac = None
self.poles = np.array([])
def __call__(self, x):
"""Internal function for usage with optimize.root, for an
instance of this class to act like a function for
root-finding. optimize.root only works with reals so we pack
and unpack complexes into float[2]
"""
omega = x[0] + 1.j*x[1]
# oblateness parameter
c = self.a * omega
# Separation constant at this a*omega
A = sep_const_closest(self.A0, self.s, c, self.m,
self.l_max)
# We are trying to find a root of this function:
# inv_err = radial.leaver_cf_trunc_inversion(omega, self.a,
# self.s, self.m, A,
# self.n_inv,
# self.Nr, self.r_N)
# TODO!
# Determine the value to use for cf_tol based on
# the Jacobian, cf_tol = |d cf(\omega)/d\omega| tol.
inv_err, self.cf_err, self.n_frac = radial.leaver_cf_inv_lentz(omega, self.a,
self.s, self.m, A,
self.n_inv, self.cf_tol,
self.Nr_min, self.Nr_max)
# logging.info("Lentz terminated with cf_err={}, n_frac={}".format(self.cf_err, self.n_frac))
# Insert optional poles
pole_factors = np.prod(omega - self.poles)
supp_err = inv_err / pole_factors
return [np.real(supp_err), np.imag(supp_err)]
def do_solve(self):
"""Try to find a root of the continued fraction equation,
using the parameters that have been set in :meth:`set_params`."""
# For the default (hybr) method, tol sets 'xtol', the
# tolerance on omega.
self.opt_res = optimize.root(self,
[np.real(self.omega_guess), np.imag(self.omega_guess)],
method = 'hybr', tol = self.tol)
if (not self.opt_res.success):
tmp_opt_res = self.opt_res
self.clear_results()
self.opt_res = tmp_opt_res
return None
self.solved = True
self.omega = self.opt_res.x[0] + 1.j*self.opt_res.x[1]
c = self.a * self.omega
# As far as I can tell, scipy.linalg.eig already normalizes
# the eigenvector to unit norm, and the coefficient with the
# largest norm is real
self.A, self.C = C_and_sep_const_closest(self.A0,
self.s, c,
self.m, self.l_max)
return self.omega
def get_cf_err(self):
"""Return the continued fraction error and the number of
iterations in the last evaluation of the continued fraction.
Returns
-------
cf_err: float
n_frac: int
"""
return self.cf_err, self.n_frac
def set_poles(self, poles=[]):
""" Set poles to multiply error function.
Parameters
----------
poles: array_like as complex numbers [default: []]
"""
self.poles = np.array(poles).astype(complex)
|
[
"numpy.finfo",
"numpy.imag",
"numpy.array",
"numpy.real",
"numpy.prod"
] |
[((3914, 3926), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3922, 3926), True, 'import numpy as np\n'), ((4322, 4334), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4330, 4334), True, 'import numpy as np\n'), ((5821, 5848), 'numpy.prod', 'np.prod', (['(omega - self.poles)'], {}), '(omega - self.poles)\n', (5828, 5848), True, 'import numpy as np\n'), ((5908, 5925), 'numpy.real', 'np.real', (['supp_err'], {}), '(supp_err)\n', (5915, 5925), True, 'import numpy as np\n'), ((5927, 5944), 'numpy.imag', 'np.imag', (['supp_err'], {}), '(supp_err)\n', (5934, 5944), True, 'import numpy as np\n'), ((2511, 2526), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2519, 2526), True, 'import numpy as np\n'), ((6285, 6310), 'numpy.real', 'np.real', (['self.omega_guess'], {}), '(self.omega_guess)\n', (6292, 6310), True, 'import numpy as np\n'), ((6312, 6337), 'numpy.imag', 'np.imag', (['self.omega_guess'], {}), '(self.omega_guess)\n', (6319, 6337), True, 'import numpy as np\n'), ((7600, 7615), 'numpy.array', 'np.array', (['poles'], {}), '(poles)\n', (7608, 7615), True, 'import numpy as np\n')]
|
import sys
import palette
if len(sys.argv) != 3:
print("Usage: debug {debug-type} {onoff}")
sys.exit(1)
dtype = sys.argv[1]
onoff = sys.argv[2]
palette.palette_api("global.debug","\"debug\": \""+dtype+"\", \"onoff\": \""+onoff+"\"" )
|
[
"sys.exit",
"palette.palette_api"
] |
[((154, 247), 'palette.palette_api', 'palette.palette_api', (['"""global.debug"""', '(\'"debug": "\' + dtype + \'", "onoff": "\' + onoff + \'"\')'], {}), '(\'global.debug\', \'"debug": "\' + dtype + \'", "onoff": "\' +\n onoff + \'"\')\n', (173, 247), False, 'import palette\n'), ((101, 112), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (109, 112), False, 'import sys\n')]
|
import os
class Scaner(object):
def __init__(self) -> None:
super().__init__()
self.current_work_dir = ""
self.all_filepath = []
def get_current_work_dir(self):
self.current_work_dir = os.path.dirname(__file__)
def get_all_filepath(self, dir_path: str, ignore_path_set: set):
'''
TODO:
1. ignore sets;
2. only scan sets;
'''
for file in os.listdir(dir_path):
file_path = os.path.join(dir_path, file)
if os.path.isdir(file_path):
self.get_all_filepath(file_path, ignore_path_set)
else:
contain_flag = False
for item in ignore_path_set:
if item in file_path:
contain_flag = True
break
if contain_flag:
continue
self.all_filepath.append(file_path)
class Examiner(object):
def __init__(self, target_str: str, err_str: str) -> None:
super().__init__()
self.file_path = ""
self.target_str = target_str
self.err_str = err_str
def insert_file(self, file_path):
self.file_path = file_path
def check_and_replace(self):
file_content = ""
with open(self.file_path, "r", encoding="utf-8") as file_object:
for line in file_object.readlines():
file_content += self.str_find_replace(
line=line, target_str=self.target_str, err_str=self.err_str)
with open(self.file_path, "w+", encoding="utf-8") as file_object:
file_object.write(file_content)
@staticmethod
def str_find_replace(line: str, target_str: str, err_str: str) -> str:
if line.find(target_str) != -1:
pass
if line.find(err_str) != -1:
line = line.replace(err_str, target_str)
return line
if __name__ == "__main__":
scaner = Scaner()
scaner.get_current_work_dir()
print("current_work_dir:{current_work_dir}\n".format(
current_work_dir=scaner.current_work_dir))
scaner.get_all_filepath(scaner.current_work_dir, ignore_path_set={
".git", ".py", ".vscode", ".DS_Store"})
examiner = Examiner(target_str="command=ssh -tt -i ",
err_str="command=ssh -i ")
for file_path in scaner.all_filepath:
examiner.insert_file(file_path=file_path)
examiner.check_and_replace()
|
[
"os.path.isdir",
"os.path.dirname",
"os.path.join",
"os.listdir"
] |
[((228, 253), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (243, 253), False, 'import os\n'), ((433, 453), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (443, 453), False, 'import os\n'), ((479, 507), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (491, 507), False, 'import os\n'), ((523, 547), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (536, 547), False, 'import os\n')]
|
#!/usr/bin/env python
"""ML models for plant disease classification."""
from __future__ import absolute_import
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2020 <NAME>"
__license__ = "MIT License"
__version__ = "0.1.0"
__url__ = "https://github.com/abdullahselek/plant-disease-classification-models"
__download_url__ = "https://pypi.org/project/plant-disease-classification-models"
__description__ = "ML models for plant disease classification."
from pkg_resources import resource_filename
def model_one():
return resource_filename(__name__, "models/model_1.pt")
|
[
"pkg_resources.resource_filename"
] |
[((554, 602), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', '"""models/model_1.pt"""'], {}), "(__name__, 'models/model_1.pt')\n", (571, 602), False, 'from pkg_resources import resource_filename\n')]
|
# -*- coding: utf8 -*-
# @author: yinan
# @time: 18-8-28 下午2:53
# @filename: gevent_tornado.py.py
import gevent.pywsgi
from gevent import monkey
monkey.patch_all()
from logging.config import dictConfig
from flask_cors import CORS
from application import app, configs
from application.controllers.client_controller import client
from application.controllers.sys_admin_controller import admin
CORS(admin)
CORS(client)
app.register_blueprint(client, url_prefix='/')
app.register_blueprint(admin, url_prefix='/sysadmin')
dictConfig(configs.LOGGING_CONFIG)
if __name__ == "__main__":
server = gevent.pywsgi.WSGIServer(('', 5000), app)
server.serve_forever()
|
[
"flask_cors.CORS",
"application.app.register_blueprint",
"logging.config.dictConfig",
"gevent.monkey.patch_all"
] |
[((145, 163), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (161, 163), False, 'from gevent import monkey\n'), ((392, 403), 'flask_cors.CORS', 'CORS', (['admin'], {}), '(admin)\n', (396, 403), False, 'from flask_cors import CORS\n'), ((404, 416), 'flask_cors.CORS', 'CORS', (['client'], {}), '(client)\n', (408, 416), False, 'from flask_cors import CORS\n'), ((417, 463), 'application.app.register_blueprint', 'app.register_blueprint', (['client'], {'url_prefix': '"""/"""'}), "(client, url_prefix='/')\n", (439, 463), False, 'from application import app, configs\n'), ((464, 517), 'application.app.register_blueprint', 'app.register_blueprint', (['admin'], {'url_prefix': '"""/sysadmin"""'}), "(admin, url_prefix='/sysadmin')\n", (486, 517), False, 'from application import app, configs\n'), ((518, 552), 'logging.config.dictConfig', 'dictConfig', (['configs.LOGGING_CONFIG'], {}), '(configs.LOGGING_CONFIG)\n', (528, 552), False, 'from logging.config import dictConfig\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import io
import json
import os
import time
import fastavro
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from confluent_kafka import Consumer, TopicPartition
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.pro.dataquality.config import (
CORRECT_SQL_DATA_TYPES_MAPPINGS,
CORRECTION_EXCLUDE_FIELDS,
TEXT_FIELD_TYPES,
)
from datamanage.pro.dataquality.mixins.base_mixins import BaseMixin
from datamanage.pro.dataquality.models.correction import (
DataQualityCorrectConditionTemplate,
DataQualityCorrectConfig,
DataQualityCorrectConfigItem,
DataQualityCorrectHandlerTemplate,
)
from datamanage.pro.dataquality.serializers.base import DataQualityDataSetSerializer
from datamanage.pro.dataquality.serializers.correction import (
CorrectConditionTemplateSerializer,
CorrectConfigCreateSerializer,
CorrectConfigUpdateSerializer,
CorrectDebugResultSerializer,
CorrectDebugSubmitSerializer,
CorrectHandlerTemplateSerializer,
CorrectSqlSerializer,
)
from datamanage.pro.pizza_settings import (
CORRECTING_DEBUG_SESSION_KEY,
DEBUG_VIRTUAL_TABLE,
PARQUET_FILE_TMP_FOLDER,
SESSION_GEOG_AREA_CODE,
)
from datamanage.utils.api import DataflowApi, MetaApi
from datamanage.utils.dbtools.hdfs_util import get_hdfs_client
from datamanage.utils.drf import DataPageNumberPagination
from django.db import transaction
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from common.decorators import list_route, params_valid
from common.local import get_request_username
from common.log import logger
from common.views import APIModelViewSet, APIViewSet
class DataQualityCorrectConfigViewSet(BaseMixin, APIViewSet):
lookup_field = "correct_config_id"
SAMPLE_DATA_SOURCE_CLUSTER_TYPE = "kafka"
VIRTUAL_TABLE_CLUSTER_TYPE = "hdfs"
VIRTUAL_TABLE_TMP_FOLDER = "correct_debug_tmp_tables"
DEBUG_DEFAULT_PROCESSING_TYPE = "batch"
MAX_SAMPLE_DATA_COUNT = 100
SAMPLE_DATA_TIMEOUT = 10
@params_valid(serializer=CorrectConfigCreateSerializer)
def create(self, request, params):
"""
@api {post} /datamanage/dataquality/correct_configs/ 创建修正规则配置
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_config_create
@apiDescription 创建修正规则配置
@apiParam {String} bk_username 用户名
@apiParam {String} data_set_id 数据集ID
@apiParam {Number} bk_biz_id 业务ID
@apiParam {Number} flow_id 数据流ID
@apiParam {Number} node_id 数据流节点ID
@apiParam {String} source_sql 原始SQL
@apiParam {String} generate_type 生成类型
@apiParam {String} description 描述
@apiParam {List} correct_configs 修正配置列表
@apiSuccess (200) {Number} data.correct_config_id 修正配置ID
@apiSuccess (200) {Number} data.data_set_id 数据集ID
@apiSuccess (200) {Number} data.flow_id 数据流ID
@apiSuccess (200) {Number} data.node_id 数据流节点ID
@apiSuccess (200) {String} data.source_sql 原始SQL
@apiSuccess (200) {String} data.correct_sql 修正SQL
@apiSuccess (200) {String} data.generate_type 生成类型
@apiSuccess (200) {String} data.created_by 创建人
@apiSuccess (200) {String} data.created_at 创建时间
@apiSuccess (200) {String} data.updated_by 更新人
@apiSuccess (200) {String} data.updated_at 更新时间
@apiSuccess (200) {String} data.description 描述
@apiSuccess (200) {List} data.correct_configs 修正详细配置项列表
@apiSuccess (200) {Number} data.correct_configs.correct_config_item_id 修正配置ID
@apiSuccess (200) {String} data.correct_configs.field 数据集字段
@apiSuccess (200) {Object} data.correct_configs.correct_config_detail 修正配置
@apiSuccess (200) {String} data.correct_configs.correct_config_alias 修正配置别名
@apiSuccess (200) {String} data.correct_configs.created_by 创建人
@apiSuccess (200) {String} data.correct_configs.created_at 创建时间
@apiSuccess (200) {String} data.correct_configs.updated_by 更新人
@apiSuccess (200) {String} data.correct_configs.updated_at 更新时间
@apiSuccess (200) {String} data.correct_configs.description 规则配置描述
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"correct_config_id": 1,
"data_set_id": "591_table",
"bk_biz_id": 591,
"flow_id": 1,
"node_id": 1,
"source_sql": "xxx",
"correct_sql": "xxx",
"generate_type": "user",
"correct_configs": [
{
"correct_config_item_id": 1,
"field": "field1",
"correct_config_detail": {
"rules": [
{
"condition": {
"condition_name": "custom_sql_condition",
"condition_type": "custom",
"condition_value": "field1 IS NOT NULL"
},
"handler": {
"handler_name": "fixed_filling",
"handler_type": "filling",
"handler_value_type": "int",
"handler_value": 100
}
}
],
"output": {
"generate_new_field": false,
"new_field": ""
}
},
"correct_config_alias": "",
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00"
}
],
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00",
"description": ""
}
}
"""
correct_configs = []
with transaction.atomic(using="bkdata_basic"):
correct_config = DataQualityCorrectConfig.objects.create(
data_set_id=params["data_set_id"],
bk_biz_id=params["bk_biz_id"],
flow_id=params["flow_id"],
node_id=params["node_id"],
source_sql=params["source_sql"],
correct_sql=self.generate_correct_sql(
params["data_set_id"],
params["source_sql"],
params["correct_configs"],
),
generate_type=params["generate_type"],
created_by=get_request_username(),
description=params["description"],
)
for item_params in params["correct_configs"]:
correct_configs.append(self.create_correct_config_item(item_params, correct_config))
return Response(
{
"correct_config_id": correct_config.id,
"data_set_id": correct_config.data_set_id,
"flow_id": correct_config.flow_id,
"node_id": correct_config.node_id,
"source_sql": correct_config.source_sql,
"correct_sql": correct_config.correct_sql,
"generate_type": correct_config.generate_type,
"correct_configs": correct_configs,
"created_by": correct_config.created_by,
"created_at": correct_config.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config.updated_by,
"updated_at": correct_config.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config.description,
}
)
def create_correct_config_item(self, params, correct_config):
correct_config_item = DataQualityCorrectConfigItem.objects.create(
field=params["field"],
correct_config=correct_config,
correct_config_detail=json.dumps(params["correct_config_detail"]),
correct_config_alias=params["correct_config_alias"],
created_by=get_request_username(),
description=params["description"],
)
return {
"correct_config_item_id": correct_config_item.id,
"field": correct_config_item.field,
"correct_config_detail": params["correct_config_detail"],
"correct_config_alias": params["correct_config_alias"],
"created_by": correct_config_item.created_by,
"created_at": correct_config_item.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config_item.updated_by,
"updated_at": correct_config_item.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config_item.description,
}
def generate_correct_sql(self, data_set_id, source_sql, correct_configs, for_debug=False):
result_table_info = MetaApi.result_tables.retrieve(
{"result_table_id": data_set_id, "related": ["fields"]},
raise_exception=True,
).data
fields = result_table_info.get("fields", [])
if for_debug:
processing_type = self.DEBUG_DEFAULT_PROCESSING_TYPE
else:
processing_type = result_table_info.get("processing_type")
source_fields = {}
for field in fields:
field_name = field.get("field_name")
if field_name not in CORRECTION_EXCLUDE_FIELDS:
source_fields[field_name] = field
new_select_parts = self.generate_select_parts(source_fields, correct_configs, processing_type)
correct_sql = "SELECT {} FROM ({})".format(", ".join(new_select_parts), source_sql)
return correct_sql
def generate_select_parts(self, fields, correct_configs, processing_type):
select_parts = []
for correct_config in correct_configs:
field = correct_config.get("field")
if field not in fields:
continue
field_type = fields[field].get("field_type")
case_parts = []
for rule in correct_config.get("correct_config_detail", {}).get("rules", []):
case_parts.append(
"WHEN {} THEN {}".format(
rule.get("condition", {}).get("condition_value"),
self.generate_handler_value(rule.get("handler", {}), field_type),
)
)
select_parts.append(
"CASE {} ELSE {} END AS {}".format(
"\n".join(case_parts),
self.generate_field_type_cast(field, field_type, processing_type),
field,
)
)
del fields[field]
for src_field in list(fields.keys()):
select_parts.append("{} as {}".format(src_field, src_field))
return select_parts
def generate_handler_value(self, handler_config, field_type):
if field_type in TEXT_FIELD_TYPES:
return "'{}'".format(handler_config.get("handler_value"))
else:
return "{}".format(handler_config.get("handler_value"))
def generate_field_type_cast(self, field, field_type, processing_type):
return "CAST({} AS {})".format(
field,
CORRECT_SQL_DATA_TYPES_MAPPINGS.get(field_type, {}).get(processing_type, field_type),
)
@params_valid(serializer=CorrectConfigUpdateSerializer)
def update(self, request, params, correct_config_id):
"""
@api {put} /datamanage/dataquality/correct_configs/{correct_config_id}/ 修改修正规则配置
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_config_update
@apiDescription 修改修正规则配置
@apiParam {String} bk_username 用户名
@apiParam {String} source_sql 原始SQL
@apiParam {List} correct_configs 修正配置列表
@apiSuccess (200) {Number} data.data_set_id 数据集ID
@apiSuccess (200) {Number} data.flow_id 数据流ID
@apiSuccess (200) {Number} data.node_id 数据流节点ID
@apiSuccess (200) {String} data.source_sql 原始SQL
@apiSuccess (200) {String} data.correct_sql 修正SQL
@apiSuccess (200) {String} data.generate_type 生成类型
@apiSuccess (200) {String} data.created_by 创建人
@apiSuccess (200) {String} data.created_at 创建时间
@apiSuccess (200) {String} data.updated_by 更新人
@apiSuccess (200) {String} data.updated_at 更新时间
@apiSuccess (200) {String} data.description 描述
@apiSuccess (200) {List} data.correct_configs 修正详细配置项列表
@apiSuccess (200) {Number} data.correct_configs.correct_config_item_id 修正配置ID
@apiSuccess (200) {String} data.correct_configs.field 数据集字段
@apiSuccess (200) {Object} data.correct_configs.correct_config_detail 修正配置
@apiSuccess (200) {String} data.correct_configs.correct_config_alias 修正配置别名
@apiSuccess (200) {String} data.correct_configs.created_by 创建人
@apiSuccess (200) {String} data.correct_configs.created_at 创建时间
@apiSuccess (200) {String} data.correct_configs.updated_by 更新人
@apiSuccess (200) {String} data.correct_configs.updated_at 更新时间
@apiSuccess (200) {String} data.correct_configs.description 规则配置描述
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"correct_config_id": 1,
"data_set_id": "591_table",
"bk_biz_id": 591,
"flow_id": 1,
"node_id": 1,
"source_sql": "xxx",
"correct_sql": "xxx",
"generate_type": "user",
"correct_configs": [
{
"correct_config_item_id": 1,
"field": "field1",
"correct_config_detail": {
"rules": [
{
"condition": {
"condition_name": "custom_sql_condition",
"condition_type": "custom",
"condition_value": "field1 IS NOT NULL"
},
"handler": {
"handler_name": "fixed_filling",
"handler_type": "filling",
"handler_value_type": "int",
"handler_value": 100
}
}
],
"output": {
"generate_new_field": false,
"new_field": ""
}
},
"correct_config_alias": "",
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00"
}
],
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00",
"description": ""
}
}
"""
correct_configs = []
with transaction.atomic(using="bkdata_basic"):
try:
correct_config = DataQualityCorrectConfig.objects.get(id=correct_config_id)
except DataQualityCorrectConfig.DoesNotExist:
raise dm_pro_errors.CorrectConfigNotExistError()
correct_config.source_sql = params["source_sql"]
correct_config.correct_sql = self.generate_correct_sql(
correct_config.data_set_id,
params["source_sql"],
params["correct_configs"],
)
correct_config.updated_by = get_request_username()
correct_config.save()
for correct_config_params in params["correct_configs"]:
correct_configs.append(self.update_correct_config(correct_config_params, correct_config))
return Response(
{
"correct_config_id": correct_config.id,
"data_set_id": correct_config.data_set_id,
"flow_id": correct_config.flow_id,
"node_id": correct_config.node_id,
"source_sql": correct_config.source_sql,
"correct_sql": correct_config.correct_sql,
"generate_type": correct_config.generate_type,
"correct_configs": correct_configs,
"created_by": correct_config.created_by,
"created_at": correct_config.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config.updated_by,
"updated_at": correct_config.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config.description,
}
)
def update_correct_config(self, params, correct_config):
if "correct_config_item_id" not in params or not params["correct_config_item_id"]:
return self.create_correct_config_item(params, correct_config)
try:
correct_config_item = DataQualityCorrectConfigItem.objects.get(id=params["correct_config_item_id"])
except DataQualityCorrectConfigItem.DoesNotExist:
raise dm_pro_errors.CorrectConfigNotExistError()
correct_config_item.field = params["field"]
correct_config_item.correct_config_detail = json.dumps(params["correct_config_detail"])
correct_config_item.correct_config_alias = params["correct_config_alias"]
correct_config_item.description = params["description"]
correct_config_item.updated_by = get_request_username()
correct_config_item.save()
return {
"correct_config_item_id": correct_config_item.id,
"field": correct_config_item.field,
"correct_config_detail": params["correct_config_detail"],
"correct_config_alias": params["correct_config_alias"],
"created_by": correct_config_item.created_by,
"created_at": correct_config_item.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config_item.updated_by,
"updated_at": correct_config_item.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config_item.description,
}
@params_valid(serializer=DataQualityDataSetSerializer)
def list(self, request, params):
"""
@api {get} /datamanage/dataquality/correct_configs/ 查询修正规则配置
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_config_list
@apiDescription 查询修正规则配置
@apiParam {String} data_set_id 数据集
@apiSuccess (200) {Number} data.correct_config_id 修正配置ID
@apiSuccess (200) {Number} data.data_set_id 数据集ID
@apiSuccess (200) {Number} data.flow_id 数据流ID
@apiSuccess (200) {Number} data.node_id 数据流节点ID
@apiSuccess (200) {String} data.source_sql 原始SQL
@apiSuccess (200) {String} data.correct_sql 修正SQL
@apiSuccess (200) {String} data.generate_type 生成类型
@apiSuccess (200) {String} data.created_by 创建人
@apiSuccess (200) {String} data.created_at 创建时间
@apiSuccess (200) {String} data.updated_by 更新人
@apiSuccess (200) {String} data.updated_at 更新时间
@apiSuccess (200) {String} data.description 描述
@apiSuccess (200) {List} data.correct_configs 修正详细配置项列表
@apiSuccess (200) {Number} data.correct_configs.correct_config_item_id 修正配置ID
@apiSuccess (200) {String} data.correct_configs.field 数据集字段
@apiSuccess (200) {Object} data.correct_configs.correct_config_detail 修正配置
@apiSuccess (200) {String} data.correct_configs.correct_config_alias 修正配置别名
@apiSuccess (200) {String} data.correct_configs.created_by 创建人
@apiSuccess (200) {String} data.correct_configs.created_at 创建时间
@apiSuccess (200) {String} data.correct_configs.updated_by 更新人
@apiSuccess (200) {String} data.correct_configs.updated_at 更新时间
@apiSuccess (200) {String} data.correct_configs.description 规则配置描述
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"correct_config_id": 1,
"data_set_id": "591_table",
"bk_biz_id": 591,
"flow_id": 1,
"node_id": 1,
"source_sql": "xxx",
"correct_sql": "xxx",
"generate_type": "user",
"correct_configs": [
{
"correct_config_item_id": 1,
"field": "field1",
"correct_config_detail": {
"rules": [
{
"condition": {
"condition_name": "custom_sql_condition",
"condition_type": "custom",
"condition_value": "field1 IS NOT NULL"
},
"handler": {
"handler_name": "fixed_filling",
"handler_type": "filling",
"handler_value_type": "int",
"handler_value": 100
}
}
],
"output": {
"generate_new_field": false,
"new_field": "",
}
},
"correct_config_alias": "",
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00"
}
],
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00",
"description": ""
}
]
}
"""
data_set_id = params["data_set_id"]
correct_configs = DataQualityCorrectConfig.objects.filter(active=True, data_set_id=data_set_id).order_by("id")
results = []
for correct_config in correct_configs:
correct_config_items = []
for correct_config_item in DataQualityCorrectConfigItem.objects.filter(correct_config=correct_config):
correct_config_items.append(
{
"correct_config_item_id": correct_config_item.id,
"field": correct_config_item.field,
"correct_config_detail": json.loads(correct_config_item.correct_config_detail),
"correct_config_alias": correct_config_item.correct_config_alias,
"created_by": correct_config_item.created_by,
"created_at": correct_config_item.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config_item.updated_by,
"updated_at": correct_config_item.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config_item.description,
}
)
results.append(
{
"correct_config_id": correct_config.id,
"data_set_id": correct_config.data_set_id,
"flow_id": correct_config.flow_id,
"node_id": correct_config.node_id,
"source_sql": correct_config.source_sql,
"correct_sql": correct_config.correct_sql,
"generate_type": correct_config.generate_type,
"correct_configs": correct_config_items,
"created_by": correct_config.created_by,
"created_at": correct_config.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config.updated_by,
"updated_at": correct_config.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config.description,
}
)
return Response(results)
def retrieve(self, request, correct_config_id):
"""
@api {get} /datamanage/dataquality/correct_configs/{correct_config_id}/ 查询单个修正规则配置
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_config_retrieve
@apiDescription 查询单个修正规则配置
@apiSuccess (200) {Number} data.correct_config_id 修正配置ID
@apiSuccess (200) {Number} data.data_set_id 数据集ID
@apiSuccess (200) {Number} data.flow_id 数据流ID
@apiSuccess (200) {Number} data.node_id 数据流节点ID
@apiSuccess (200) {String} data.source_sql 原始SQL
@apiSuccess (200) {String} data.correct_sql 修正SQL
@apiSuccess (200) {String} data.generate_type 生成类型
@apiSuccess (200) {String} data.created_by 创建人
@apiSuccess (200) {String} data.created_at 创建时间
@apiSuccess (200) {String} data.updated_by 更新人
@apiSuccess (200) {String} data.updated_at 更新时间
@apiSuccess (200) {String} data.description 描述
@apiSuccess (200) {List} data.correct_configs 修正详细配置项列表
@apiSuccess (200) {Number} data.correct_configs.correct_config_item_id 修正配置ID
@apiSuccess (200) {String} data.correct_configs.field 数据集字段
@apiSuccess (200) {Object} data.correct_configs.correct_config_detail 修正配置
@apiSuccess (200) {String} data.correct_configs.correct_config_alias 修正配置别名
@apiSuccess (200) {String} data.correct_configs.created_by 创建人
@apiSuccess (200) {String} data.correct_configs.created_at 创建时间
@apiSuccess (200) {String} data.correct_configs.updated_by 更新人
@apiSuccess (200) {String} data.correct_configs.updated_at 更新时间
@apiSuccess (200) {String} data.correct_configs.description 规则配置描述
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"correct_config_id": 1,
"data_set_id": "591_table",
"bk_biz_id": 591,
"flow_id": 1,
"node_id": 1,
"source_sql": "xxx",
"correct_sql": "xxx",
"generate_type": "user",
"correct_configs": [
{
"correct_config_item_id": 1,
"field": "field1",
"correct_config_detail": {
"rules": [
{
"condition": {
"condition_name": "custom_sql_condition",
"condition_type": "custom",
"condition_value": "field1 IS NOT NULL"
},
"handler": {
"handler_name": "fixed_filling",
"handler_type": "filling",
"handler_value_type": "int",
"handler_value": 100
}
}
],
"output": {
"generate_new_field": false,
"new_field": "",
}
},
"correct_config_alias": "",
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00"
}
],
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00",
"description": ""
}
]
}
"""
try:
correct_config = DataQualityCorrectConfig.objects.get(id=correct_config_id)
except DataQualityCorrectConfig.DoesNotExist:
logger.error("修正配置({})不存在".format(correct_config_id))
raise dm_pro_errors.CorrectConfigNotExistError()
correct_config_items = []
for correct_config_item in DataQualityCorrectConfigItem.objects.filter(correct_config=correct_config):
correct_config_items.append(
{
"correct_config_item_id": correct_config_item.id,
"field": correct_config_item.field,
"correct_config_detail": json.loads(correct_config_item.correct_config_detail),
"correct_config_alias": correct_config_item.correct_config_alias,
"created_by": correct_config_item.created_by,
"created_at": correct_config_item.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config_item.updated_by,
"updated_at": correct_config_item.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config_item.description,
}
)
return Response(
{
"correct_config_id": correct_config.id,
"data_set_id": correct_config.data_set_id,
"flow_id": correct_config.flow_id,
"node_id": correct_config.node_id,
"source_sql": correct_config.source_sql,
"correct_sql": correct_config.correct_sql,
"generate_type": correct_config.generate_type,
"correct_configs": correct_config_items,
"created_by": correct_config.created_by,
"created_at": correct_config.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"updated_by": correct_config.updated_by,
"updated_at": correct_config.updated_at.strftime("%Y-%m-%d %H:%M:%S"),
"description": correct_config.description,
}
)
def delete(self, request, correct_config_id):
"""
@api {delete} /datamanage/dataquality/correct_configs/{correct_config_id}/ 删除单个修正规则配置
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_config_delete
@apiDescription 删除单个修正规则配置
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": "ok"
}
"""
try:
correct_config = DataQualityCorrectConfig.objects.get(id=correct_config_id)
except DataQualityCorrectConfig.DoesNotExist:
logger.error("修正配置({})不存在".format(correct_config_id))
raise dm_pro_errors.CorrectConfigNotExistError()
with transaction.atomic(using="bkdata_basic"):
DataQualityCorrectConfigItem.objects.filter(correct_config=correct_config).delete()
correct_config.delete()
return Response("ok")
@list_route(methods=["post"], url_path="correct_sql")
@params_valid(serializer=CorrectSqlSerializer)
def correct_sql(self, request, params):
"""
@api {post} /datamanage/dataquality/correct_configs/correct_sql/ 生成修正SQL
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_config_correct_sql
@apiDescription 生成修正SQL
@apiParam {String} data_set_id 数据集
@apiParam {String} source_sql 原始SQL
@apiParam {Object} correct_configs 修正配置
@apiSuccess (200) {Number} data.correct_sql 修正SQL
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": "SELECT * FROM table"
}
"""
return Response(
self.generate_correct_sql(
params["data_set_id"],
params["source_sql"],
params["correct_configs"],
)
)
@list_route(methods=["post"], url_path="debug/submit")
@params_valid(serializer=CorrectDebugSubmitSerializer)
def debug_submit(self, request, params):
"""
@api {post} /datamanage/dataquality/correct_configs/debug/submit/ 提交修正SQL调试任务
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_debug_submit
@apiDescription 提交修正SQL调试任务
@apiParam {String} source_data_set_id 上游数据集ID
@apiParam {String} data_set_id 当前数据集ID
@apiParam {String} source_sql 原始SQL
@apiParam {Object} correct_configs 修正配置
@apiSuccess (200) {Number} data.debug_request_id 调试请求ID,用于获取调试结果
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {
"debug_request_id": "xx"
}
}
"""
# 获取调试用的样本数据
source_data_set_id = params["source_data_set_id"]
sample_data = self.get_data_set_sample_data(source_data_set_id)
sample_file_name, sample_file_path = self.generate_parquet_sample_data(sample_data, source_data_set_id)
# 把调试用样本数据写到HDFS,使调试Session server能通过相关接口访问
cluster_name, hosts, physical_table_name = self.get_hdfs_info_by_result_table(DEBUG_VIRTUAL_TABLE)
hdfs_client = get_hdfs_client(cluster_name, hosts)
dest_path = os.path.join(physical_table_name, self.VIRTUAL_TABLE_TMP_FOLDER, sample_file_name)
hdfs_client.copy_from_local(localsrc=sample_file_path, dest=dest_path, overwrite=True)
# 生成调试SQL
debug_correct_sql = self.generate_correct_sql(
params["data_set_id"],
params["source_sql"],
params["correct_configs"],
for_debug=True,
)
session_server_id = self.get_session_server_id(CORRECTING_DEBUG_SESSION_KEY)
codes = [
"""import json""",
"""df = spark_session.create_customize_path_dataframe('{}', ['{}'])""".format(
DEBUG_VIRTUAL_TABLE,
os.path.join(self.VIRTUAL_TABLE_TMP_FOLDER, sample_file_name),
),
"""df.createOrReplaceTempView('{}')""".format(source_data_set_id),
"""debug_df = spark_session.sql('''{}''')""".format(debug_correct_sql),
"""debug_results = debug_df.collect()""",
"""print(json.dumps([item.asDict() for item in debug_results]))""",
]
res = DataflowApi.interactive_codes.create(
{
"server_id": session_server_id,
"code": "\n".join(codes),
"geog_area_code": SESSION_GEOG_AREA_CODE,
},
raise_exception=True,
)
return Response(
{
"debug_request_id": res.data.get("id"),
}
)
def get_data_set_sample_data(self, data_set_id):
channel_storage = self.fetch_data_set_storages(data_set_id, self.SAMPLE_DATA_SOURCE_CLUSTER_TYPE)
(
consumer,
topic,
partition,
max_offset,
min_offset,
) = self.get_consumer_by_channel(channel_storage)
sample_data = []
target_offset = max(max_offset - self.MAX_SAMPLE_DATA_COUNT, min_offset)
target_count = min(max_offset - min_offset + 1, self.MAX_SAMPLE_DATA_COUNT)
topic_partition = TopicPartition(topic=topic, partition=partition, offset=target_offset)
self.consumer_seek_offset(consumer, topic_partition)
messages = consumer.consume(target_count, timeout=self.SAMPLE_DATA_TIMEOUT)
for message in messages:
data = self.parse_avro_message(message.value())
sample_data.extend(data[0])
if len(sample_data) > self.MAX_SAMPLE_DATA_COUNT:
break
return sample_data[:100]
def get_consumer_by_channel(self, channel_storage):
channel_info = channel_storage.get("storage_channel", {})
topic_name = str(channel_storage.get("physical_table_name"))
topic_partition = TopicPartition(topic=topic_name, partition=0)
if not channel_info.get("cluster_domain"):
return
consumer = Consumer(
{
"bootstrap.servers": "{}:{}".format(
channel_info.get("cluster_domain"), channel_info.get("cluster_port")
),
"group.id": "correction_debug_group",
"auto.offset.reset": "latest",
}
)
consumer.assign([topic_partition])
min_offset, max_offset = consumer.get_watermark_offsets(topic_partition)
return consumer, topic_name, 0, max_offset, min_offset
def consumer_seek_offset(self, consumer, topic_partition):
start_time = time.time()
while True:
if time.time() - start_time > self.SAMPLE_DATA_TIMEOUT:
raise dm_pro_errors.SetConsumerOffsetTimeoutError()
try:
consumer.seek(topic_partition)
break
except Exception:
time.sleep(0.1)
continue
def parse_avro_message(self, message):
records = []
decode_data = message.decode("utf8")
encode_data = decode_data.encode("ISO-8859-1")
row_file = io.BytesIO(encode_data)
records.extend([x["_value_"] for x in fastavro.reader(row_file)])
return records
def generate_parquet_sample_data(self, sample_data, source_data_set_id):
sample_df = pd.DataFrame(sample_data)
sample_table = pa.Table.from_pandas(sample_df)
sample_file_name = "{}.parquet".format(source_data_set_id)
sample_file_path = os.path.join(PARQUET_FILE_TMP_FOLDER, sample_file_name)
pq.write_table(sample_table, sample_file_path)
return sample_file_name, sample_file_path
def get_hdfs_info_by_result_table(self, result_table_id):
hdfs_storage = self.fetch_data_set_storages(result_table_id, self.VIRTUAL_TABLE_CLUSTER_TYPE)
connection_info = json.loads(hdfs_storage.get("storage_cluster", {}).get("connection_info", "{}"))
hosts = connection_info.get("hosts")
port = connection_info.get("port")
hosts = ",".join(["{}:{}".format(host, port) for host in hosts.split(",")])
cluster_name = connection_info.get("hdfs_cluster_name")
physical_table_name = hdfs_storage.get("physical_table_name")
return cluster_name, hosts, physical_table_name
@list_route(methods=["get"], url_path="debug/result")
@params_valid(serializer=CorrectDebugResultSerializer)
def debug_result(self, request, params):
"""
@api {get} /datamanage/dataquality/correct_configs/debug/result/ 获取修正SQL调试任务
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_debug_result
@apiDescription 获取修正SQL调试任务
@apiParam {String} debug_request_id 调试请求ID
@apiSuccess (200) {Number} data.started 调试开始时间
@apiSuccess (200) {Number} data.completed 调试结束时间
@apiSuccess (200) {String} data.state 调试运行状态
@apiSuccess (200) {Number} data.progress 调试运行进度
@apiSuccess (200) {Object} data.output 调试输出结果
@apiSuccess (200) {String} data.output.status 调试输出结果状态
@apiSuccess (200) {Object) data.output.data 调试输出内容
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": "SELECT * FROM table"
}
"""
debug_request_id = params["debug_request_id"]
session_server_id = self.get_session_server_id(CORRECTING_DEBUG_SESSION_KEY)
res = DataflowApi.interactive_codes.retrieve(
{
"server_id": session_server_id,
"code_id": debug_request_id,
"geog_area_code": SESSION_GEOG_AREA_CODE,
},
raise_exception=True,
)
debug_results = []
if res.data.get("state") == "available":
if res.data.get("output", {}).get("status") == "ok":
debug_results = json.loads(res.data.get("output", {}).get("data", {}).get("text/plain", {}))
status = (res.data.get("output") or {}).get("status")
response = {
"started": res.data.get("started"),
"completed": res.data.get("completed"),
"state": res.data.get("state"),
"progress": res.data.get("progress"),
"output": {"status": status, "data": debug_results, "error": {}},
}
if status == "error":
response["output"]["error"].update(
{
"message": res.data.get("output", {}).get("evalue"),
"exception": res.data.get("output", {}).get("ename"),
"traceback": res.data.get("output", {}).get("traceback"),
}
)
return Response(response)
class DataQualityCorrectHandlerViewSet(BaseMixin, APIModelViewSet):
model = DataQualityCorrectHandlerTemplate
lookup_field = model._meta.pk.name
filter_backends = (DjangoFilterBackend,)
pagination_class = DataPageNumberPagination
serializer_class = CorrectHandlerTemplateSerializer
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
return self.model.objects.filter(active=True)
def list(self, request):
"""
@api {get} /datamanage/dataquality/correct_handlers/ 数据修正处理模板列表
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_handler_template_list
@apiDescription 数据修正处理模板列表
@apiSuccess (200) {String} data.handler_template_name 修正处理模板名称
@apiSuccess (200) {String} data.handler_template_alias 修正处理模板别名
@apiSuccess (200) {String} data.handler_template_type 修正处理模板类型
@apiSuccess (200) {String} data.handler_template_config 修正处理模板配置
@apiSuccess (200) {String} data.description 模板描述
@apiSuccess (200) {String} data.created_by 创建人
@apiSuccess (200) {String} data.created_at 创建时间
@apiSuccess (200) {String} data.updated_by 更新人
@apiSuccess (200) {String} data.updated_at 更新时间
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"id": 1,
"handler_template_name": "fixed_filling",
"handler_template_alias": "固定值填充",
"handler_template_type": "fixed",
"handler_template_config": {},
"description": "xxxxxx",
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00"
}
]
}
"""
return super(DataQualityCorrectHandlerViewSet, self).list(request)
class DataQualityCorrectConditionViewSet(BaseMixin, APIModelViewSet):
model = DataQualityCorrectConditionTemplate
lookup_field = model._meta.pk.name
filter_backends = (DjangoFilterBackend,)
pagination_class = DataPageNumberPagination
serializer_class = CorrectConditionTemplateSerializer
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
return self.model.objects.filter(active=True)
def list(self, request):
"""
@api {get} /datamanage/dataquality/correct_conditions/ 数据修正判断模板列表
@apiVersion 3.5.0
@apiGroup DataQualityCorrection
@apiName dataquality_correct_condition_template_list
@apiDescription 数据修正判断模板列表
@apiSuccess (200) {String} data.condition_template_name 判断模板名称
@apiSuccess (200) {String} data.condition_template_alias 判断模板别名
@apiSuccess (200) {String} data.condition_template_type 判断模板类型
@apiSuccess (200) {String} data.condition_template_config 判断模板配置
@apiSuccess (200) {String} data.description 模板描述
@apiSuccess (200) {String} data.created_by 创建人
@apiSuccess (200) {String} data.created_at 创建时间
@apiSuccess (200) {String} data.updated_by 更新人
@apiSuccess (200) {String} data.updated_at 更新时间
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": [
{
"id": 1,
"condition_template_name": "custom_sql_condition",
"condition_template_alias": "自定义SQL条件",
"condition_template_type": "custom",
"condition_template_config": {},
"description": "xxxxxx",
"created_by": "admin",
"created_at": "2020-07-27 10:30:00",
"updated_by": "admin",
"updated_at": "2020-07-27 10:31:00"
}
]
}
"""
return super(DataQualityCorrectConditionViewSet, self).list(request)
|
[
"datamanage.pro.exceptions.CorrectConfigNotExistError",
"common.decorators.params_valid",
"datamanage.pro.dataquality.config.CORRECT_SQL_DATA_TYPES_MAPPINGS.get",
"fastavro.reader",
"json.dumps",
"pyarrow.Table.from_pandas",
"rest_framework.response.Response",
"datamanage.utils.api.DataflowApi.interactive_codes.retrieve",
"django.db.transaction.atomic",
"os.path.join",
"pandas.DataFrame",
"datamanage.pro.exceptions.SetConsumerOffsetTimeoutError",
"json.loads",
"datamanage.pro.dataquality.models.correction.DataQualityCorrectConfigItem.objects.get",
"common.decorators.list_route",
"common.local.get_request_username",
"io.BytesIO",
"datamanage.utils.api.MetaApi.result_tables.retrieve",
"datamanage.utils.dbtools.hdfs_util.get_hdfs_client",
"time.sleep",
"pyarrow.parquet.write_table",
"confluent_kafka.TopicPartition",
"datamanage.pro.dataquality.models.correction.DataQualityCorrectConfigItem.objects.filter",
"time.time",
"datamanage.pro.dataquality.models.correction.DataQualityCorrectConfig.objects.get",
"datamanage.pro.dataquality.models.correction.DataQualityCorrectConfig.objects.filter"
] |
[((3437, 3491), 'common.decorators.params_valid', 'params_valid', ([], {'serializer': 'CorrectConfigCreateSerializer'}), '(serializer=CorrectConfigCreateSerializer)\n', (3449, 3491), False, 'from common.decorators import list_route, params_valid\n'), ((13611, 13665), 'common.decorators.params_valid', 'params_valid', ([], {'serializer': 'CorrectConfigUpdateSerializer'}), '(serializer=CorrectConfigUpdateSerializer)\n', (13623, 13665), False, 'from common.decorators import list_route, params_valid\n'), ((21210, 21263), 'common.decorators.params_valid', 'params_valid', ([], {'serializer': 'DataQualityDataSetSerializer'}), '(serializer=DataQualityDataSetSerializer)\n', (21222, 21263), False, 'from common.decorators import list_route, params_valid\n'), ((35602, 35654), 'common.decorators.list_route', 'list_route', ([], {'methods': "['post']", 'url_path': '"""correct_sql"""'}), "(methods=['post'], url_path='correct_sql')\n", (35612, 35654), False, 'from common.decorators import list_route, params_valid\n'), ((35660, 35705), 'common.decorators.params_valid', 'params_valid', ([], {'serializer': 'CorrectSqlSerializer'}), '(serializer=CorrectSqlSerializer)\n', (35672, 35705), False, 'from common.decorators import list_route, params_valid\n'), ((36697, 36750), 'common.decorators.list_route', 'list_route', ([], {'methods': "['post']", 'url_path': '"""debug/submit"""'}), "(methods=['post'], url_path='debug/submit')\n", (36707, 36750), False, 'from common.decorators import list_route, params_valid\n'), ((36756, 36809), 'common.decorators.params_valid', 'params_valid', ([], {'serializer': 'CorrectDebugSubmitSerializer'}), '(serializer=CorrectDebugSubmitSerializer)\n', (36768, 36809), False, 'from common.decorators import list_route, params_valid\n'), ((43354, 43406), 'common.decorators.list_route', 'list_route', ([], {'methods': "['get']", 'url_path': '"""debug/result"""'}), "(methods=['get'], url_path='debug/result')\n", (43364, 43406), False, 'from common.decorators import list_route, params_valid\n'), ((43412, 43465), 'common.decorators.params_valid', 'params_valid', ([], {'serializer': 'CorrectDebugResultSerializer'}), '(serializer=CorrectDebugResultSerializer)\n', (43424, 43465), False, 'from common.decorators import list_route, params_valid\n'), ((20287, 20330), 'json.dumps', 'json.dumps', (["params['correct_config_detail']"], {}), "(params['correct_config_detail'])\n", (20297, 20330), False, 'import json\n'), ((20518, 20540), 'common.local.get_request_username', 'get_request_username', ([], {}), '()\n', (20538, 20540), False, 'from common.local import get_request_username\n'), ((27925, 27942), 'rest_framework.response.Response', 'Response', (['results'], {}), '(results)\n', (27933, 27942), False, 'from rest_framework.response import Response\n'), ((32777, 32851), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfigItem.objects.filter', 'DataQualityCorrectConfigItem.objects.filter', ([], {'correct_config': 'correct_config'}), '(correct_config=correct_config)\n', (32820, 32851), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((35581, 35595), 'rest_framework.response.Response', 'Response', (['"""ok"""'], {}), "('ok')\n", (35589, 35595), False, 'from rest_framework.response import Response\n'), ((38152, 38188), 'datamanage.utils.dbtools.hdfs_util.get_hdfs_client', 'get_hdfs_client', (['cluster_name', 'hosts'], {}), '(cluster_name, hosts)\n', (38167, 38188), False, 'from datamanage.utils.dbtools.hdfs_util import get_hdfs_client\n'), ((38209, 38295), 'os.path.join', 'os.path.join', (['physical_table_name', 'self.VIRTUAL_TABLE_TMP_FOLDER', 'sample_file_name'], {}), '(physical_table_name, self.VIRTUAL_TABLE_TMP_FOLDER,\n sample_file_name)\n', (38221, 38295), False, 'import os\n'), ((40224, 40294), 'confluent_kafka.TopicPartition', 'TopicPartition', ([], {'topic': 'topic', 'partition': 'partition', 'offset': 'target_offset'}), '(topic=topic, partition=partition, offset=target_offset)\n', (40238, 40294), False, 'from confluent_kafka import Consumer, TopicPartition\n'), ((40912, 40957), 'confluent_kafka.TopicPartition', 'TopicPartition', ([], {'topic': 'topic_name', 'partition': '(0)'}), '(topic=topic_name, partition=0)\n', (40926, 40957), False, 'from confluent_kafka import Consumer, TopicPartition\n'), ((41632, 41643), 'time.time', 'time.time', ([], {}), '()\n', (41641, 41643), False, 'import time\n'), ((42158, 42181), 'io.BytesIO', 'io.BytesIO', (['encode_data'], {}), '(encode_data)\n', (42168, 42181), False, 'import io\n'), ((42377, 42402), 'pandas.DataFrame', 'pd.DataFrame', (['sample_data'], {}), '(sample_data)\n', (42389, 42402), True, 'import pandas as pd\n'), ((42426, 42457), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['sample_df'], {}), '(sample_df)\n', (42446, 42457), True, 'import pyarrow as pa\n'), ((42552, 42607), 'os.path.join', 'os.path.join', (['PARQUET_FILE_TMP_FOLDER', 'sample_file_name'], {}), '(PARQUET_FILE_TMP_FOLDER, sample_file_name)\n', (42564, 42607), False, 'import os\n'), ((42616, 42662), 'pyarrow.parquet.write_table', 'pq.write_table', (['sample_table', 'sample_file_path'], {}), '(sample_table, sample_file_path)\n', (42630, 42662), True, 'import pyarrow.parquet as pq\n'), ((44655, 44828), 'datamanage.utils.api.DataflowApi.interactive_codes.retrieve', 'DataflowApi.interactive_codes.retrieve', (["{'server_id': session_server_id, 'code_id': debug_request_id,\n 'geog_area_code': SESSION_GEOG_AREA_CODE}"], {'raise_exception': '(True)'}), "({'server_id': session_server_id,\n 'code_id': debug_request_id, 'geog_area_code': SESSION_GEOG_AREA_CODE},\n raise_exception=True)\n", (44693, 44828), False, 'from datamanage.utils.api import DataflowApi, MetaApi\n'), ((45905, 45923), 'rest_framework.response.Response', 'Response', (['response'], {}), '(response)\n', (45913, 45923), False, 'from rest_framework.response import Response\n'), ((8147, 8187), 'django.db.transaction.atomic', 'transaction.atomic', ([], {'using': '"""bkdata_basic"""'}), "(using='bkdata_basic')\n", (8165, 8187), False, 'from django.db import transaction\n'), ((11108, 11221), 'datamanage.utils.api.MetaApi.result_tables.retrieve', 'MetaApi.result_tables.retrieve', (["{'result_table_id': data_set_id, 'related': ['fields']}"], {'raise_exception': '(True)'}), "({'result_table_id': data_set_id, 'related':\n ['fields']}, raise_exception=True)\n", (11138, 11221), False, 'from datamanage.utils.api import DataflowApi, MetaApi\n'), ((18035, 18075), 'django.db.transaction.atomic', 'transaction.atomic', ([], {'using': '"""bkdata_basic"""'}), "(using='bkdata_basic')\n", (18053, 18075), False, 'from django.db import transaction\n'), ((18618, 18640), 'common.local.get_request_username', 'get_request_username', ([], {}), '()\n', (18638, 18640), False, 'from common.local import get_request_username\n'), ((19985, 20062), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfigItem.objects.get', 'DataQualityCorrectConfigItem.objects.get', ([], {'id': "params['correct_config_item_id']"}), "(id=params['correct_config_item_id'])\n", (20025, 20062), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((26055, 26129), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfigItem.objects.filter', 'DataQualityCorrectConfigItem.objects.filter', ([], {'correct_config': 'correct_config'}), '(correct_config=correct_config)\n', (26098, 26129), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((32467, 32525), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfig.objects.get', 'DataQualityCorrectConfig.objects.get', ([], {'id': 'correct_config_id'}), '(id=correct_config_id)\n', (32503, 32525), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((35137, 35195), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfig.objects.get', 'DataQualityCorrectConfig.objects.get', ([], {'id': 'correct_config_id'}), '(id=correct_config_id)\n', (35173, 35195), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((35391, 35431), 'django.db.transaction.atomic', 'transaction.atomic', ([], {'using': '"""bkdata_basic"""'}), "(using='bkdata_basic')\n", (35409, 35431), False, 'from django.db import transaction\n'), ((10142, 10185), 'json.dumps', 'json.dumps', (["params['correct_config_detail']"], {}), "(params['correct_config_detail'])\n", (10152, 10185), False, 'import json\n'), ((10275, 10297), 'common.local.get_request_username', 'get_request_username', ([], {}), '()\n', (10295, 10297), False, 'from common.local import get_request_username\n'), ((18127, 18185), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfig.objects.get', 'DataQualityCorrectConfig.objects.get', ([], {'id': 'correct_config_id'}), '(id=correct_config_id)\n', (18163, 18185), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((20139, 20181), 'datamanage.pro.exceptions.CorrectConfigNotExistError', 'dm_pro_errors.CorrectConfigNotExistError', ([], {}), '()\n', (20179, 20181), True, 'from datamanage.pro import exceptions as dm_pro_errors\n'), ((25816, 25893), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfig.objects.filter', 'DataQualityCorrectConfig.objects.filter', ([], {'active': '(True)', 'data_set_id': 'data_set_id'}), '(active=True, data_set_id=data_set_id)\n', (25855, 25893), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((32664, 32706), 'datamanage.pro.exceptions.CorrectConfigNotExistError', 'dm_pro_errors.CorrectConfigNotExistError', ([], {}), '()\n', (32704, 32706), True, 'from datamanage.pro import exceptions as dm_pro_errors\n'), ((35334, 35376), 'datamanage.pro.exceptions.CorrectConfigNotExistError', 'dm_pro_errors.CorrectConfigNotExistError', ([], {}), '()\n', (35374, 35376), True, 'from datamanage.pro import exceptions as dm_pro_errors\n'), ((38886, 38947), 'os.path.join', 'os.path.join', (['self.VIRTUAL_TABLE_TMP_FOLDER', 'sample_file_name'], {}), '(self.VIRTUAL_TABLE_TMP_FOLDER, sample_file_name)\n', (38898, 38947), False, 'import os\n'), ((41754, 41799), 'datamanage.pro.exceptions.SetConsumerOffsetTimeoutError', 'dm_pro_errors.SetConsumerOffsetTimeoutError', ([], {}), '()\n', (41797, 41799), True, 'from datamanage.pro import exceptions as dm_pro_errors\n'), ((8780, 8802), 'common.local.get_request_username', 'get_request_username', ([], {}), '()\n', (8800, 8802), False, 'from common.local import get_request_username\n'), ((13509, 13560), 'datamanage.pro.dataquality.config.CORRECT_SQL_DATA_TYPES_MAPPINGS.get', 'CORRECT_SQL_DATA_TYPES_MAPPINGS.get', (['field_type', '{}'], {}), '(field_type, {})\n', (13544, 13560), False, 'from datamanage.pro.dataquality.config import CORRECT_SQL_DATA_TYPES_MAPPINGS, CORRECTION_EXCLUDE_FIELDS, TEXT_FIELD_TYPES\n'), ((18266, 18308), 'datamanage.pro.exceptions.CorrectConfigNotExistError', 'dm_pro_errors.CorrectConfigNotExistError', ([], {}), '()\n', (18306, 18308), True, 'from datamanage.pro import exceptions as dm_pro_errors\n'), ((33083, 33136), 'json.loads', 'json.loads', (['correct_config_item.correct_config_detail'], {}), '(correct_config_item.correct_config_detail)\n', (33093, 33136), False, 'import json\n'), ((35445, 35519), 'datamanage.pro.dataquality.models.correction.DataQualityCorrectConfigItem.objects.filter', 'DataQualityCorrectConfigItem.objects.filter', ([], {'correct_config': 'correct_config'}), '(correct_config=correct_config)\n', (35488, 35519), False, 'from datamanage.pro.dataquality.models.correction import DataQualityCorrectConditionTemplate, DataQualityCorrectConfig, DataQualityCorrectConfigItem, DataQualityCorrectHandlerTemplate\n'), ((41679, 41690), 'time.time', 'time.time', ([], {}), '()\n', (41688, 41690), False, 'import time\n'), ((41933, 41948), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (41943, 41948), False, 'import time\n'), ((42228, 42253), 'fastavro.reader', 'fastavro.reader', (['row_file'], {}), '(row_file)\n', (42243, 42253), False, 'import fastavro\n'), ((26381, 26434), 'json.loads', 'json.loads', (['correct_config_item.correct_config_detail'], {}), '(correct_config_item.correct_config_detail)\n', (26391, 26434), False, 'import json\n')]
|
from bush import color
from bush.aws.base import AWSBase
class RDS(AWSBase):
USAGE = """%prog rds <Command> [options]
Commands
* ls
"""
SUB_COMMANDS = ['ls']
def __init__(self, options):
super().__init__(options, 'rds')
def __get_instances_internal(self):
filter_name = ''
filter_values = ''
options = self.options
if options.db_instance_id:
filter_name = 'db-instance-id'
filter_values = options.db_instance_id.split(',')
else:
return self.client.describe_db_instances()
f = {'Name': filter_name, 'Values': filter_values}
return self.client.describe_db_instances(Filters=[f])
def __get_instances(self):
instances = self.__get_instances_internal()['DBInstances']
self.instances = []
for instance in instances:
row = {}
row['id'] = instance['DBInstanceIdentifier']
row['instance_type'] = instance['DBInstanceClass']
row['engine'] = instance['Engine']
row['username'] = instance['MasterUsername']
row['db_name'] = instance['DBName']
endpoint = instance['Endpoint']
row['endpoint'] = endpoint['Address']
row['port'] = str(endpoint['Port'])
row['multi_az'] = str(instance['MultiAZ'])
row['state'] = instance['DBInstanceStatus']
self.instances.append(row)
def __get_state(self, state):
if state == 'available':
return color.green(state)
if state == 'deleting' or state == 'failed' or state == 'storage-full':
return color.red(state)
if state == 'creating' or state == 'modirying' or state == 'rebooting':
return color.yellow(state)
else:
return state
def ls(self):
columns = [
'id',
'instance_type',
'engine',
'username',
'db_name',
'endpoint',
'port',
'multi_az',
'state'
]
self.__get_instances()
formats = []
for i, column in enumerate(columns):
max_len = len(column)
for instance in self.instances:
val = instance[column] or ''
if len(val) > max_len:
max_len = len(val)
formats.append('{%s:<%s}' % (i, max_len + 1))
list_format = ''.join(formats)
header = list_format.format(*columns)
page = []
page.append(header)
page.append('-' * (len(header) - 1))
for instance in self.instances:
row = []
for key in instance:
if key == 'state':
row.append(self.__get_state(instance[key]))
else:
row.append(instance[key])
page.append(list_format.format(*row))
return page
|
[
"bush.color.green",
"bush.color.yellow",
"bush.color.red"
] |
[((1544, 1562), 'bush.color.green', 'color.green', (['state'], {}), '(state)\n', (1555, 1562), False, 'from bush import color\n'), ((1662, 1678), 'bush.color.red', 'color.red', (['state'], {}), '(state)\n', (1671, 1678), False, 'from bush import color\n'), ((1778, 1797), 'bush.color.yellow', 'color.yellow', (['state'], {}), '(state)\n', (1790, 1797), False, 'from bush import color\n')]
|
import datetime
import flask
import markdown
from personal_site import constants, db
from personal_site.forum import utils
class PostFollow(db.Model):
__tablename__ = "post_follow"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
post_id = db.Column(db.Integer, db.ForeignKey("post.id"))
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
comments = db.relationship("Comment", backref="parent_post", lazy="dynamic")
num_comments = db.Column(db.Integer)
title = db.Column(db.String(constants.POST_TITLE_MAX_LEN))
body = db.Column(db.Text)
posted_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
edited_at = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow)
last_activity = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow)
show_anon = db.Column(db.Boolean)
followers = db.relationship("User", secondary="post_follow", lazy="dynamic")
def __init__(self, author, title, body, show_anon):
self.author_id = author.id
self.title = title
self.body = body
self.num_comments = 0
self.show_anon = show_anon
def edit(self, new_body, new_anon_policy):
if self.body == new_body and self.show_anon == new_anon_policy:
return
self.body = new_body
self.show_anon = new_anon_policy
self.edited_at = datetime.datetime.now()
self.last_activity = datetime.datetime.now()
def notify_followers(self, poster):
url = flask.url_for("forum.view_post", post_id=self.id)
for follower in self.followers:
# Don't notify the comment author
if follower.id != poster.id:
follower.notify(constants.NEW_COMMENT_NOTIF_STR, url)
@property
def html_body(self):
result = utils.safe_html(self.body)
return markdown.markdown(result, extensions=["extra", "codehilite"])
@property
def was_edited(self):
diff = abs(self.posted_at - self.edited_at)
return diff.seconds > 0
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey("post.id"))
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
comment_idx = db.Column(db.Integer)
body = db.Column(db.Text)
posted_at = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow)
edited_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
show_anon = db.Column(db.Boolean)
def __init__(self, post, author, body, show_anon):
self.post_id = post.id
# comment_idx helps with redirects after an edit
post.num_comments += 1
self.comment_idx = post.num_comments
self.author_id = author.id
self.body = body
self.show_anon = show_anon
def edit(self, new_body, new_anon_policy):
if self.body == new_body and self.show_anon == new_anon_policy:
return
self.body = new_body
self.show_anon = new_anon_policy
self.edited_at = datetime.datetime.utcnow()
@property
def html_body(self):
result = utils.safe_html(self.body)
return markdown.markdown(result, extensions=["extra", "codehilite"])
@property
def was_edited(self):
diff = abs(self.posted_at - self.edited_at)
return diff.seconds > 0
|
[
"personal_site.forum.utils.safe_html",
"personal_site.db.relationship",
"markdown.markdown",
"datetime.datetime.utcnow",
"flask.url_for",
"personal_site.db.ForeignKey",
"personal_site.db.Column",
"personal_site.db.String",
"datetime.datetime.now"
] |
[((198, 237), 'personal_site.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (207, 237), False, 'from personal_site import constants, db\n'), ((395, 434), 'personal_site.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (404, 434), False, 'from personal_site import constants, db\n'), ((514, 579), 'personal_site.db.relationship', 'db.relationship', (['"""Comment"""'], {'backref': '"""parent_post"""', 'lazy': '"""dynamic"""'}), "('Comment', backref='parent_post', lazy='dynamic')\n", (529, 579), False, 'from personal_site import constants, db\n'), ((599, 620), 'personal_site.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (608, 620), False, 'from personal_site import constants, db\n'), ((695, 713), 'personal_site.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (704, 713), False, 'from personal_site import constants, db\n'), ((731, 787), 'personal_site.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.datetime.utcnow'}), '(db.DateTime, default=datetime.datetime.utcnow)\n', (740, 787), False, 'from personal_site import constants, db\n'), ((804, 872), 'personal_site.db.Column', 'db.Column', (['db.DateTime'], {'index': '(True)', 'default': 'datetime.datetime.utcnow'}), '(db.DateTime, index=True, default=datetime.datetime.utcnow)\n', (813, 872), False, 'from personal_site import constants, db\n'), ((893, 961), 'personal_site.db.Column', 'db.Column', (['db.DateTime'], {'index': '(True)', 'default': 'datetime.datetime.utcnow'}), '(db.DateTime, index=True, default=datetime.datetime.utcnow)\n', (902, 961), False, 'from personal_site import constants, db\n'), ((979, 1000), 'personal_site.db.Column', 'db.Column', (['db.Boolean'], {}), '(db.Boolean)\n', (988, 1000), False, 'from personal_site import constants, db\n'), ((1018, 1082), 'personal_site.db.relationship', 'db.relationship', (['"""User"""'], {'secondary': '"""post_follow"""', 'lazy': '"""dynamic"""'}), "('User', secondary='post_follow', lazy='dynamic')\n", (1033, 1082), False, 'from personal_site import constants, db\n'), ((2227, 2266), 'personal_site.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (2236, 2266), False, 'from personal_site import constants, db\n'), ((2411, 2432), 'personal_site.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (2420, 2432), False, 'from personal_site import constants, db\n'), ((2444, 2462), 'personal_site.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (2453, 2462), False, 'from personal_site import constants, db\n'), ((2480, 2548), 'personal_site.db.Column', 'db.Column', (['db.DateTime'], {'index': '(True)', 'default': 'datetime.datetime.utcnow'}), '(db.DateTime, index=True, default=datetime.datetime.utcnow)\n', (2489, 2548), False, 'from personal_site import constants, db\n'), ((2565, 2621), 'personal_site.db.Column', 'db.Column', (['db.DateTime'], {'default': 'datetime.datetime.utcnow'}), '(db.DateTime, default=datetime.datetime.utcnow)\n', (2574, 2621), False, 'from personal_site import constants, db\n'), ((2639, 2660), 'personal_site.db.Column', 'db.Column', (['db.Boolean'], {}), '(db.Boolean)\n', (2648, 2660), False, 'from personal_site import constants, db\n'), ((274, 298), 'personal_site.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (287, 298), False, 'from personal_site import constants, db\n'), ((336, 360), 'personal_site.db.ForeignKey', 'db.ForeignKey', (['"""post.id"""'], {}), "('post.id')\n", (349, 360), False, 'from personal_site import constants, db\n'), ((473, 497), 'personal_site.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (486, 497), False, 'from personal_site import constants, db\n'), ((643, 682), 'personal_site.db.String', 'db.String', (['constants.POST_TITLE_MAX_LEN'], {}), '(constants.POST_TITLE_MAX_LEN)\n', (652, 682), False, 'from personal_site import constants, db\n'), ((1526, 1549), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1547, 1549), False, 'import datetime\n'), ((1579, 1602), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1600, 1602), False, 'import datetime\n'), ((1658, 1707), 'flask.url_for', 'flask.url_for', (['"""forum.view_post"""'], {'post_id': 'self.id'}), "('forum.view_post', post_id=self.id)\n", (1671, 1707), False, 'import flask\n'), ((1962, 1988), 'personal_site.forum.utils.safe_html', 'utils.safe_html', (['self.body'], {}), '(self.body)\n', (1977, 1988), False, 'from personal_site.forum import utils\n'), ((2004, 2065), 'markdown.markdown', 'markdown.markdown', (['result'], {'extensions': "['extra', 'codehilite']"}), "(result, extensions=['extra', 'codehilite'])\n", (2021, 2065), False, 'import markdown\n'), ((2303, 2327), 'personal_site.db.ForeignKey', 'db.ForeignKey', (['"""post.id"""'], {}), "('post.id')\n", (2316, 2327), False, 'from personal_site import constants, db\n'), ((2367, 2391), 'personal_site.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (2380, 2391), False, 'from personal_site import constants, db\n'), ((3210, 3236), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3234, 3236), False, 'import datetime\n'), ((3294, 3320), 'personal_site.forum.utils.safe_html', 'utils.safe_html', (['self.body'], {}), '(self.body)\n', (3309, 3320), False, 'from personal_site.forum import utils\n'), ((3336, 3397), 'markdown.markdown', 'markdown.markdown', (['result'], {'extensions': "['extra', 'codehilite']"}), "(result, extensions=['extra', 'codehilite'])\n", (3353, 3397), False, 'import markdown\n')]
|
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class OtherEntityData(BaseSchema):
# Configuration swagger.json
article_identifier = fields.Str(required=False)
|
[
"marshmallow.fields.Str"
] |
[((270, 296), 'marshmallow.fields.Str', 'fields.Str', ([], {'required': '(False)'}), '(required=False)\n', (280, 296), False, 'from marshmallow import fields, Schema\n')]
|
#!/usr/bin/env python
"""
Subscribes to SourceDestination topic.
Uses MoveIt to compute a trajectory from the target to the destination.
Trajectory is then published to PickAndPlaceTrajectory topic.
"""
import rospy
import math
from builderbot_mycobot.msg import MyCobotMoveitJoints, EulerJoints
from moveit_msgs.msg import RobotTrajectory
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard:\n%s", data)
def talker():
rospy.init_node('Trajectory', anonymous=True)
rospy.Publisher("/mycobot_joints", MyCobotMoveitJoints, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
talker()
|
[
"rospy.spin",
"rospy.Publisher",
"rospy.init_node",
"rospy.get_caller_id"
] |
[((459, 504), 'rospy.init_node', 'rospy.init_node', (['"""Trajectory"""'], {'anonymous': '(True)'}), "('Trajectory', anonymous=True)\n", (474, 504), False, 'import rospy\n'), ((509, 574), 'rospy.Publisher', 'rospy.Publisher', (['"""/mycobot_joints"""', 'MyCobotMoveitJoints', 'callback'], {}), "('/mycobot_joints', MyCobotMoveitJoints, callback)\n", (524, 574), False, 'import rospy\n'), ((653, 665), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (663, 665), False, 'import rospy\n'), ((393, 414), 'rospy.get_caller_id', 'rospy.get_caller_id', ([], {}), '()\n', (412, 414), False, 'import rospy\n')]
|
import ast, astunparse, copy
from pprint import pprint
def test_change_callee(tree):
analyzer = Analyzer()
transformer = ChangeCallee("f1", "func1")
analyzer.visit(transformer.visit(tree))
analyzer.report()
def test_replace_var_linear(tree):
analyzer = Analyzer()
transformer = ReplaceVarLinear("i", 1)
analyzer.visit(transformer.visit(tree))
analyzer.report()
def test_replace_var_recursive(tree):
analyzer = Analyzer()
transformer = ReplaceVarRecursive("n", "l")
analyzer.visit(transformer.visit(tree))
analyzer.report()
def test_negate_cond(tree):
analyzer = Analyzer()
transformer = NegateCond()
analyzer.visit(transformer.visit(tree))
analyzer.report()
def test_add_guard(tree):
analyzer = Analyzer()
transformer = AddGuard()
analyzer.visit(transformer.visit(tree))
analyzer.report()
class Analyzer(ast.NodeVisitor):
def __init__(self):
self.functions = {}
def visit_FunctionDef(self, node: ast.FunctionDef):
self.functions[node.name] = node
#self.generic_visit(node)
def report(self):
for f in self.functions:
print(astunparse.unparse(self.functions[f]))
class ChangeCallee(ast.NodeTransformer):
def __init__(self, oname:str, nname: str):
self.oname = oname
self.nname = nname
def visit_Call(self, node: ast.Call):
if node.func.id == self.oname:
result = copy.deepcopy(node)
result.func.id = self.nname
return result
return node
class ReplaceVarLinear(ast.NodeTransformer):
def __init__(self, vname: str, const: int):
self.vname = vname
self.const = const
def visit_Name(self, node: ast.Name):
if node.id == self.vname:
result = ast.BinOp(left=ast.Name(self.vname, ctx=node.ctx), op=ast.Add(), right=ast.Num(self.const))
return result
return node
class ReplaceVarRecursive(ast.NodeTransformer):
def __init__(self, vname: str, attr: str):
self.vname = vname
self.attr = attr
def visit_Name(self, node: ast.Name):
if node.id == self.vname:
result = ast.Attribute(value=ast.Name(self.vname, ctx=node.ctx), attr=self.attr, ctx=node.ctx)
return result
return node
class NegateCond(ast.NodeTransformer):
def visit_If(self, node: ast.If):
result = copy.deepcopy(node)
cond = copy.deepcopy(node.test)
if isinstance(cond, ast.Compare):
if len(cond.ops) == 1:
if isinstance(cond.ops[0], ast.Eq):
cond.ops[0] = ast.NotEq()
elif isinstance(cond.ops[0], ast.NotEq):
cond.ops[0] = ast.Eq()
result.test = cond
return result
class AddGuard(ast.NodeTransformer):
def visit_Call(self, node: ast.Call):
body = copy.deepcopy(node)
result = ast.If(test=ast.NameConstant(True), body=[ast.Expr(body)], orelse=[])
print(astunparse.unparse(result))
return result
class Transformer(ast.NodeTransformer):
def visit_Name(self, node: ast.Name):
if node.id == "i":
result = ast.Name()
result.id = "j"
result.lineno = node.lineno
result.col_offset = node.col_offset
return result
return node
def visit_arg(self, node: ast.arg):
if node.arg == "i":
result = ast.arg("j", node.annotation)
return result
return node
if __name__ == "__main__":
with open("examples/sources/loop-rec.py", "r") as source:
tree = ast.parse(source.read())
#print(ast.dump(tree))
test_add_guard(tree)
|
[
"copy.deepcopy",
"ast.Num",
"ast.Add",
"ast.arg",
"ast.Name",
"ast.NameConstant",
"ast.NotEq",
"ast.Expr",
"ast.Eq",
"astunparse.unparse"
] |
[((2419, 2438), 'copy.deepcopy', 'copy.deepcopy', (['node'], {}), '(node)\n', (2432, 2438), False, 'import ast, astunparse, copy\n'), ((2456, 2480), 'copy.deepcopy', 'copy.deepcopy', (['node.test'], {}), '(node.test)\n', (2469, 2480), False, 'import ast, astunparse, copy\n'), ((2901, 2920), 'copy.deepcopy', 'copy.deepcopy', (['node'], {}), '(node)\n', (2914, 2920), False, 'import ast, astunparse, copy\n'), ((1452, 1471), 'copy.deepcopy', 'copy.deepcopy', (['node'], {}), '(node)\n', (1465, 1471), False, 'import ast, astunparse, copy\n'), ((3022, 3048), 'astunparse.unparse', 'astunparse.unparse', (['result'], {}), '(result)\n', (3040, 3048), False, 'import ast, astunparse, copy\n'), ((3204, 3214), 'ast.Name', 'ast.Name', ([], {}), '()\n', (3212, 3214), False, 'import ast, astunparse, copy\n'), ((3467, 3496), 'ast.arg', 'ast.arg', (['"""j"""', 'node.annotation'], {}), "('j', node.annotation)\n", (3474, 3496), False, 'import ast, astunparse, copy\n'), ((1166, 1203), 'astunparse.unparse', 'astunparse.unparse', (['self.functions[f]'], {}), '(self.functions[f])\n', (1184, 1203), False, 'import ast, astunparse, copy\n'), ((2950, 2972), 'ast.NameConstant', 'ast.NameConstant', (['(True)'], {}), '(True)\n', (2966, 2972), False, 'import ast, astunparse, copy\n'), ((1820, 1854), 'ast.Name', 'ast.Name', (['self.vname'], {'ctx': 'node.ctx'}), '(self.vname, ctx=node.ctx)\n', (1828, 1854), False, 'import ast, astunparse, copy\n'), ((1859, 1868), 'ast.Add', 'ast.Add', ([], {}), '()\n', (1866, 1868), False, 'import ast, astunparse, copy\n'), ((1876, 1895), 'ast.Num', 'ast.Num', (['self.const'], {}), '(self.const)\n', (1883, 1895), False, 'import ast, astunparse, copy\n'), ((2211, 2245), 'ast.Name', 'ast.Name', (['self.vname'], {'ctx': 'node.ctx'}), '(self.vname, ctx=node.ctx)\n', (2219, 2245), False, 'import ast, astunparse, copy\n'), ((2644, 2655), 'ast.NotEq', 'ast.NotEq', ([], {}), '()\n', (2653, 2655), False, 'import ast, astunparse, copy\n'), ((2980, 2994), 'ast.Expr', 'ast.Expr', (['body'], {}), '(body)\n', (2988, 2994), False, 'import ast, astunparse, copy\n'), ((2747, 2755), 'ast.Eq', 'ast.Eq', ([], {}), '()\n', (2753, 2755), False, 'import ast, astunparse, copy\n')]
|
# Generated by Django 2.1.11 on 2019-10-07 09:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('analysis', '0003_auto_20190909_1400'),
]
operations = [
migrations.AlterUniqueTogether(
name='version',
unique_together={('dependency', 'major', 'minor', 'micro')},
),
]
|
[
"django.db.migrations.AlterUniqueTogether"
] |
[((229, 341), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""version"""', 'unique_together': "{('dependency', 'major', 'minor', 'micro')}"}), "(name='version', unique_together={(\n 'dependency', 'major', 'minor', 'micro')})\n", (259, 341), False, 'from django.db import migrations\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 26 15:28:50 2021
Some of this code derives from this jupyter notebook
Source: https://gist.github.com/maduranga95/56c8a7c39a00746cec494b07d2886ad7
"""
from Bio import SeqIO
import csv
import hashlib
def debruijnize(k_mers):
nodes = set()
edges = []
for k, w in k_mers.items():
r1 = k[:-1]
r2 = k[1:]
#r1_hash = kmer_to_sha(r1)
#r2_hash = kmer_to_sha(r2)
nodes.add(r1)
nodes.add(r2)
edges.append((r1,r2))
return (nodes,edges)
def kmer_to_sha(kmer):
return hashlib.sha224(kmer.encode('utf-8')).hexdigest()
def build_k_mer(str,k):
return [str[i:k+i] for i in range(0,len(str)-k+1)]
def read_fasta_reads_to_kmers(filename, k_mer_size):
k_mers = []
weighted = {}
for seq_record in SeqIO.parse(filename, "fasta"):
k_mers.extend(build_k_mer(str(seq_record.seq), k_mer_size))
for i in k_mers:
if i not in weighted:
weighted[i] = 0
weighted[i] += 1
return weighted
def de_bruijn_graphizer(filename, k):
k_mers = {}
k_mers = read_fasta_reads_to_kmers(filename, k)
G = debruijnize(k_mers)
write_edges_file(G[1])
write_nodes_file(G[0])
def write_edges_file(edges):
h = ['source', 'target']
f = open('../Visualisierung/edges.csv', 'w')
w = csv.writer(f)
w.writerow(h)
st = set(edges)
for i in list(st):
w.writerow(i)
f.close()
def write_nodes_file(nodes):
h = ['id']
f = open('../Visualisierung/nodes.csv', 'w')
w = csv.writer(f)
w.writerow(h)
lis = list(nodes)
for n in lis:
w.writerow([n])
f.close()
def print_list(items):
for i in items:
print(i)
de_bruijn_graphizer("test.fasta", 2)
# test.fasta
# virus_perfectreads.fasta
# virus_errorreads.fasta
# virus_errorreads2.fasta
# virus_perfectreads.fasta
# virus2_errorreads.fasta
# virus2_errorreads2.fasta
|
[
"Bio.SeqIO.parse",
"csv.writer"
] |
[((855, 885), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filename', '"""fasta"""'], {}), "(filename, 'fasta')\n", (866, 885), False, 'from Bio import SeqIO\n'), ((1414, 1427), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1424, 1427), False, 'import csv\n'), ((1648, 1661), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1658, 1661), False, 'import csv\n')]
|
import glob, os, pickle, datetime, time, re, pprint
import matplotlib.pyplot as plt
import numpy as np
from src import plotter, graphs
from src.mltoolbox.metrics import METRICS
from src.utils import *
from shutil import copyfile, rmtree
def main():
# SETUP BEGIN
"""
x01 : reg only uniform edges avg on 8 tests
x02 : reg cycles avg on 8 tests
x03 : reg all with avg on 16 tests
x04 : svm
"""
test_suite_root = './test_log/paper/'
test_suite_code = 'conv_real_reg'
test_suite_pattern = 'test_*n_exp*'
log_pattern = re.compile(r'.*') # re.compile(r'.*mse_log\.gz$')
excluded_graphs = []
# SETUP END
# list paths of all folders of tests that satisfy the setting pattern
test_folder_paths = [s.replace('\\', '/') for s in list(glob.iglob(
os.path.normpath("{}{}/{}".format(
test_suite_root,
test_suite_code,
test_suite_pattern
).replace('\\', '/'))))
]
# if paths list if empty abort with error
if len(test_folder_paths) == 0:
raise Exception("Empty test folder paths list")
# setup file and metrics (are initially taken from the first simulation in the path list)
setup = None
setup_metrics = set([])
setup_real_metrics = set([])
"""
logs = {
$TEST_FOLDER_PATH : {
$TEST_LOG_FILENAME : $LOG_ARRAY
}
}
"""
logs = {}
for test_folder_path in test_folder_paths[:]:
if re.match(r'.*.AVG$', test_folder_path):
# if the folder is result of a previous merge then skip it
test_folder_paths.remove(test_folder_path)
continue
try:
# open current test setup file
with open("{}/.setup.pkl".format(test_folder_path), 'rb') as setup_file:
_setup = pickle.load(setup_file)
except:
print("No setup file to open")
raise
if setup is None:
# is setup variable is not set then set setup and setup metrics equal to those
# of the current opened test folder
setup = _setup
setup_metrics = set(setup['metrics'])
setup_real_metrics = set(setup['real_metrics'])
else:
# is setup is already set then intersect metrics and real metrics to keep only
# those shared among all test folders
setup_metrics &= set(setup['metrics'])
setup_real_metrics &= set(setup['real_metrics'])
logs[test_folder_path] = {}
# list all logs inside current test folder escaping problematic characters
test_logs_paths = [s.replace('\\', '/') for s in list(
glob.iglob(
os.path.normpath(os.path.join(glob.escape(test_folder_path), '*.gz')).replace('\\', '/'))
)
]
print("Extract logs from {}".format(test_folder_path))
# loop through all logs inside current test folder
for test_log_path in test_logs_paths:
# take only the name of the log file
test_log_filename = test_log_path.split('/')[-1]
# split graph's name and log name
test_log_graph, test_log_name = test_log_filename.split('_', 1)
if "avg_iter_time_log" in test_log_name or "max_iter_time_log" in test_log_name:
# avg_iter_time and max_iter_time need to be treated in a particular way since made
# by tuples and not by single float values
logs[test_folder_path][test_log_filename] = [
tuple([float(s.split(",")[0]), float(s.split(",")[1])]) for s in np.loadtxt(test_log_path, str)
]
elif log_pattern.match(test_log_filename) or test_log_name in ['iter_time.gz', 'iter_time.txt.gz']:
# load log into dict normally without preprocessing values
logs[test_folder_path][test_log_filename] = np.loadtxt(test_log_path)
# get the list of all logs' names of the first test folder without duplicates
avg_log_names = set(logs[test_folder_paths[0]].keys())
for i in range(1, len(test_folder_paths)):
# intersect with all other folders' logs' names
avg_log_names &= set(logs[test_folder_paths[i]].keys())
"""
avg_logs = {
$LOG_X_NAME : [$LOG_X_TEST#1, $LOG_X_TEST#2, ...],
...
$LOG_Y_NAME : [$LOG_Y_TEST#1, $LOG_Y_TEST#2, ...],
...
}
"""
avg_logs = {}
min_log_lengths = {} # $LOG_NAME : $MIN_LOG_LENGTH
new_setup_graphs_names = set([])
for test_folder_path in logs:
for log_name in list(avg_log_names):
new_setup_graphs_names.add(log_name.split('_', maxsplit=1)[0])
if log_name not in avg_logs:
avg_logs[log_name] = []
min_log_lengths[log_name] = math.inf
avg_logs[log_name].append(logs[test_folder_path][log_name])
min_log_lengths[log_name] = min(min_log_lengths[log_name], len(logs[test_folder_path][log_name]))
for log_name in list(avg_log_names):
print("Create merged {}".format(log_name))
avg_logs[log_name] = [l[0:min_log_lengths[log_name]] for l in avg_logs[log_name]]
avg_logs[log_name] = np.array(np.sum(avg_logs[log_name], axis=0)) / len(avg_logs[log_name])
new_ordered_setup_graphs_names = []
for graph in setup['graphs']:
if graph in list(new_setup_graphs_names):
new_ordered_setup_graphs_names.append(graph)
setup['graphs'] = graphs.generate_n_nodes_graphs_list(setup['n'], new_ordered_setup_graphs_names)
setup['metrics'] = list(setup_metrics)
setup['real_metrics'] = list(setup_real_metrics)
avg_output_dir = os.path.normpath(os.path.join(
test_folder_paths[0].rsplit('/', maxsplit=1)[0],
test_folder_paths[0].rsplit('/', maxsplit=1)[1].split('conflict')[0] + '.AVG'
))
if os.path.exists(avg_output_dir):
if input(
"Folder {} already exists, continuing will cause the loss of all data already inside it, continue "
"anyway? (type 'y' or 'yes' to continue or any other key to abort)".format(avg_output_dir)) not in [
'y', 'yes']:
raise Exception("Script aborted")
rmtree(avg_output_dir)
os.makedirs(avg_output_dir)
for log_name in avg_logs:
dest = os.path.normpath(os.path.join(
avg_output_dir,
log_name
))
np.savetxt(dest, avg_logs[log_name], delimiter=',')
print('Saved {}'.format(log_name, dest))
with open(os.path.join(avg_output_dir, '.setup.pkl'), "wb") as f:
pickle.dump(setup, f, pickle.HIGHEST_PROTOCOL)
print('Setup dumped into {}'.format(os.path.join(avg_output_dir, '.setup.pkl')))
# Fill descriptor with setup dictionary
descriptor = """>>> Test Descriptor File
AVERAGE TEST OUTPUT FILE
Date: {}
Tests merged: {}\n
""".format(str(datetime.datetime.fromtimestamp(time.time())),
pprint.PrettyPrinter(indent=4).pformat(test_folder_paths))
for k, v in setup.items():
descriptor += "{} = {}\n".format(k, v)
descriptor += "\n"
with open(os.path.join(avg_output_dir, '.descriptor.txt'), "w") as f:
f.write(descriptor)
print('Descriptor file created at {}'.format(os.path.join(avg_output_dir, '.descriptor.txt')))
if __name__ == '__main__':
main()
|
[
"pickle.dump",
"numpy.sum",
"os.makedirs",
"numpy.savetxt",
"os.path.exists",
"re.match",
"time.time",
"src.graphs.generate_n_nodes_graphs_list",
"pprint.PrettyPrinter",
"pickle.load",
"glob.escape",
"numpy.loadtxt",
"shutil.rmtree",
"os.path.join",
"re.compile"
] |
[((562, 578), 're.compile', 're.compile', (['""".*"""'], {}), "('.*')\n", (572, 578), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((5525, 5604), 'src.graphs.generate_n_nodes_graphs_list', 'graphs.generate_n_nodes_graphs_list', (["setup['n']", 'new_ordered_setup_graphs_names'], {}), "(setup['n'], new_ordered_setup_graphs_names)\n", (5560, 5604), False, 'from src import plotter, graphs\n'), ((5912, 5942), 'os.path.exists', 'os.path.exists', (['avg_output_dir'], {}), '(avg_output_dir)\n', (5926, 5942), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((6301, 6328), 'os.makedirs', 'os.makedirs', (['avg_output_dir'], {}), '(avg_output_dir)\n', (6312, 6328), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((1479, 1516), 're.match', 're.match', (['""".*.AVG$"""', 'test_folder_path'], {}), "('.*.AVG$', test_folder_path)\n", (1487, 1516), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((6274, 6296), 'shutil.rmtree', 'rmtree', (['avg_output_dir'], {}), '(avg_output_dir)\n', (6280, 6296), False, 'from shutil import copyfile, rmtree\n'), ((6474, 6525), 'numpy.savetxt', 'np.savetxt', (['dest', 'avg_logs[log_name]'], {'delimiter': '""","""'}), "(dest, avg_logs[log_name], delimiter=',')\n", (6484, 6525), True, 'import numpy as np\n'), ((6654, 6700), 'pickle.dump', 'pickle.dump', (['setup', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(setup, f, pickle.HIGHEST_PROTOCOL)\n', (6665, 6700), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((6392, 6430), 'os.path.join', 'os.path.join', (['avg_output_dir', 'log_name'], {}), '(avg_output_dir, log_name)\n', (6404, 6430), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((6590, 6632), 'os.path.join', 'os.path.join', (['avg_output_dir', '""".setup.pkl"""'], {}), "(avg_output_dir, '.setup.pkl')\n", (6602, 6632), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((7195, 7242), 'os.path.join', 'os.path.join', (['avg_output_dir', '""".descriptor.txt"""'], {}), "(avg_output_dir, '.descriptor.txt')\n", (7207, 7242), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((1832, 1855), 'pickle.load', 'pickle.load', (['setup_file'], {}), '(setup_file)\n', (1843, 1855), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((5258, 5292), 'numpy.sum', 'np.sum', (['avg_logs[log_name]'], {'axis': '(0)'}), '(avg_logs[log_name], axis=0)\n', (5264, 5292), True, 'import numpy as np\n'), ((6745, 6787), 'os.path.join', 'os.path.join', (['avg_output_dir', '""".setup.pkl"""'], {}), "(avg_output_dir, '.setup.pkl')\n", (6757, 6787), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((6996, 7007), 'time.time', 'time.time', ([], {}), '()\n', (7005, 7007), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((7019, 7049), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (7039, 7049), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((7336, 7383), 'os.path.join', 'os.path.join', (['avg_output_dir', '""".descriptor.txt"""'], {}), "(avg_output_dir, '.descriptor.txt')\n", (7348, 7383), False, 'import glob, os, pickle, datetime, time, re, pprint\n'), ((3940, 3965), 'numpy.loadtxt', 'np.loadtxt', (['test_log_path'], {}), '(test_log_path)\n', (3950, 3965), True, 'import numpy as np\n'), ((3644, 3674), 'numpy.loadtxt', 'np.loadtxt', (['test_log_path', 'str'], {}), '(test_log_path, str)\n', (3654, 3674), True, 'import numpy as np\n'), ((2758, 2787), 'glob.escape', 'glob.escape', (['test_folder_path'], {}), '(test_folder_path)\n', (2769, 2787), False, 'import glob, os, pickle, datetime, time, re, pprint\n')]
|
from lib.game_agent import GameAgent
import lib.ocr
import lib.trigonometry
import lib.raycasting
import offshoot
import time
import json
import subprocess
from pprint import pprint
from .helpers.frame_processing import *
class SuperHexagonGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
plugin_path = offshoot.config["file_paths"]["plugins"]
ocr_classifier_path = f"{plugin_path}/SuperHexagonGameAgentPlugin/files/ml_models/super_hexagon_ocr.model"
self.machine_learning_models["ocr_classifier"] = self.load_machine_learning_model(ocr_classifier_path)
context_classifier_path = f"{plugin_path}/SuperHexagonGameAgentPlugin/files/ml_models/super_hexagon_context.model"
self.machine_learning_models["context_classifier"] = self.load_machine_learning_model(context_classifier_path)
self.frame_handlers["PLAY"] = self.handle_play
self.frame_shape = (self.game.window_geometry["height"], self.game.window_geometry["width"])
self.frame_angles_to_center = lib.trigonometry.angles_to_center(self.frame_shape)
self.frame_distances_to_center = lib.trigonometry.distances_to_center(self.frame_shape)
self.key_direction_mapping = {
"+": self.input_controller.keyboard.left_key,
"-": self.input_controller.keyboard.right_key
}
self.game_state = {
"scores": {},
"keypress_duration_index": 0,
"keypress_durations": [0.0300],
"collision_threshold": 240,
"max_run": 10,
"total_runs": 1,
"frame_count": 0,
"previous_context": None,
"ray_configurations": {
15: [17, 19, 21, 23, 24],
},
"current_ray_angle": 15,
"current_ray_angle_quantity_index": 0
}
@property
def game_contexts(self):
return dict(
s="Splash Screen",
l="Level Select Screen",
g="Game Screen",
d="Death Screen"
)
def handle_play(self, frame):
gray_frame = grayscale_frame(frame.frame)
if self.game_state["previous_context"] == "Game Screen" and self.game_state["frame_count"] % 15 != 0:
context = self.game_state["previous_context"]
else:
processed_context_frame = process_frame_for_context(gray_frame)
context_prediction = self.machine_learning_models["context_classifier"].predict([processed_context_frame])[0]
context = self.game_contexts.get(context_prediction, "Unknown")
self.game_state["previous_context"] = context
if context == "Splash Screen":
splash_action = " ".join(lib.ocr.words_in_image_region(
frame.frame,
self.game.screen_regions["SPLASH_ACTIONS"],
self.machine_learning_models["ocr_classifier"],
word_window_size=(1, 8)
))
if "start game" in splash_action:
self.input_controller.tap_key(self.input_controller.keyboard.enter_key)
else:
self.input_controller.tap_key(self.input_controller.keyboard.right_key)
time.sleep(5 / 60)
elif context == "Level Select Screen":
#self.input_controller.tap_key(self.input_controller.keyboard.right_key, duration=random.uniform(0.0, 1.0))
self.input_controller.tap_key(self.input_controller.keyboard.enter_key)
time.sleep(10 / 60)
elif context == "Game Screen":
# Preprocess
processed_frame_for_game_play = process_frame_for_game_play(gray_frame)
if processed_frame_for_game_play is None:
self.game_state["frame_count"] += 1
return None
# Detect Player Character
player_bounding_box = get_player_character_bounding_box(processed_frame_for_game_play, self.game.screen_regions["GAME_PLAYER_AREA"])
if player_bounding_box:
player_bounding_box_center = (
(player_bounding_box[0] + player_bounding_box[2]) // 2,
(player_bounding_box[1] + player_bounding_box[3]) // 2,
)
player_to_center_angle = self.frame_angles_to_center[player_bounding_box_center]
player_to_center_distance = self.frame_distances_to_center[player_bounding_box_center]
# Mask out center & player
processed_frame_for_game_play[self.frame_distances_to_center < (player_to_center_distance + (player_bounding_box[3] - player_bounding_box[1]))] = 0
rays = lib.raycasting.generate_rays(player_to_center_angle, mode="UNIFORM", quantity=self.game_state["ray_configurations"][self.game_state["current_ray_angle"]][self.game_state["current_ray_angle_quantity_index"]], starting_angle=self.game_state["current_ray_angle"])
ray_collision_distances = lib.raycasting.calculate_minimum_collision_distances(
rays,
processed_frame_for_game_play,
self.frame_angles_to_center,
self.frame_distances_to_center
)
if ray_collision_distances["Ray Player"] <= self.game_state["collision_threshold"]:
best_ray = max(ray_collision_distances.items(), key=lambda i: i[1])[0]
if best_ray == "Ray Player":
self.game_state["frame_count"] += 1
return None
direction, magnitude = best_ray.split(" ")[2:]
self.input_controller.tap_key(
self.key_direction_mapping[direction],
duration=(int(magnitude) / 15) * self.game_state["keypress_durations"][self.game_state["keypress_duration_index"]]
)
elif context == "Death Screen":
death_time_last = " ".join(lib.ocr.words_in_image_region(
frame.frame,
self.game.screen_regions["DEATH_TIME_LAST"],
self.machine_learning_models["ocr_classifier"],
word_window_size=(1, 8)
))
try:
score = float(death_time_last.replace(":", ".").replace("o", "0").replace("b", "8").replace("s", "5").replace("g", "6"))
except ValueError:
score = None
print(death_time_last)
print(score)
if "%.4f" % self.game_state["keypress_durations"][self.game_state["keypress_duration_index"]] not in self.game_state["scores"]:
self.game_state["scores"]["%.4f" % self.game_state["keypress_durations"][self.game_state["keypress_duration_index"]]] = list()
if score is not None:
self.game_state["scores"]["%.4f" % self.game_state["keypress_durations"][self.game_state["keypress_duration_index"]]].append(score)
score_averages = {duration: (np.max(scores or [0.0]), np.mean(scores or [0.0])) for duration, scores in self.game_state["scores"].items()}
subprocess.call(["clear"])
pprint(score_averages)
print("")
print("Total Runs: " + str(self.game_state["total_runs"]))
print("Current Keypress Duration: " + str(self.game_state["keypress_durations"][self.game_state["keypress_duration_index"]]))
print("Current Collision Threshold: " + str(self.game_state["collision_threshold"]))
print("Current Ray Angle: " + str(self.game_state["current_ray_angle"]))
print("Current Ray Quantity: " + str(self.game_state["ray_configurations"][self.game_state["current_ray_angle"]][self.game_state["current_ray_angle_quantity_index"]]))
with open(f"scores_{self.game_state['current_ray_angle']}_{self.game_state['ray_configurations'][self.game_state['current_ray_angle']][self.game_state['current_ray_angle_quantity_index']]}_hexagon.json", "w") as f:
f.write(json.dumps(score_averages))
if len(self.game_state["scores"]["%.4f" % self.game_state["keypress_durations"][self.game_state["keypress_duration_index"]]]) >= self.game_state["max_run"]:
self.game_state["keypress_duration_index"] += 1
if self.game_state["keypress_duration_index"] >= len(self.game_state["keypress_durations"]):
self.game_state["keypress_duration_index"] = 0
self.game_state["current_ray_angle_quantity_index"] += 1
if self.game_state["current_ray_angle_quantity_index"] == len(self.game_state["ray_configurations"][self.game_state["current_ray_angle"]]):
self.game_state["current_ray_angle_quantity_index"] = 0
self.game_state["current_ray_angle"] += 15
self.game_state["scores"] = {}
self.input_controller.tap_key(self.input_controller.keyboard.enter_key)
self.game_state["total_runs"] += 1
time.sleep(10 / 60)
self.game_state["frame_count"] += 1
|
[
"subprocess.call",
"pprint.pprint",
"json.dumps",
"time.sleep"
] |
[((3251, 3269), 'time.sleep', 'time.sleep', (['(5 / 60)'], {}), '(5 / 60)\n', (3261, 3269), False, 'import time\n'), ((3533, 3552), 'time.sleep', 'time.sleep', (['(10 / 60)'], {}), '(10 / 60)\n', (3543, 3552), False, 'import time\n'), ((7168, 7194), 'subprocess.call', 'subprocess.call', (["['clear']"], {}), "(['clear'])\n", (7183, 7194), False, 'import subprocess\n'), ((7208, 7230), 'pprint.pprint', 'pprint', (['score_averages'], {}), '(score_averages)\n', (7214, 7230), False, 'from pprint import pprint\n'), ((9098, 9117), 'time.sleep', 'time.sleep', (['(10 / 60)'], {}), '(10 / 60)\n', (9108, 9117), False, 'import time\n'), ((8077, 8103), 'json.dumps', 'json.dumps', (['score_averages'], {}), '(score_averages)\n', (8087, 8103), False, 'import json\n')]
|
import numpy as np
from skimage.metrics import structural_similarity, peak_signal_noise_ratio
import functools
# Data format: H W C
__all__ = [
'psnr',
'ssim',
'sam',
'ergas',
'mpsnr',
'mssim',
'mpsnr_max'
]
def psnr(output, target, data_range=1):
return peak_signal_noise_ratio(target, output, data_range=data_range)
def ssim(img1, img2, **kwargs):
return structural_similarity(img1, img2, channel_axis=2, **kwargs)
def sam(img1, img2, eps=1e-8):
"""
Spectral Angle Mapper which defines the spectral similarity between two spectra
"""
tmp1 = np.sum(img1*img2, axis=2) + eps
tmp2 = np.sqrt(np.sum(img1**2, axis=2)) + eps
tmp3 = np.sqrt(np.sum(img2**2, axis=2)) + eps
tmp4 = tmp1 / tmp2 / tmp3
angle = np.arccos(tmp4.clip(-1, 1))
return np.mean(np.real(angle))
def ergas(output, target, r=1):
b = target.shape[-1]
ergas = 0
for i in range(b):
ergas += np.mean((target[:, :, i]-output[:, :, i])**2) / (np.mean(target[:, :, i])**2)
ergas = 100*r*np.sqrt(ergas/b)
return ergas
# ---------------------------------------------------------------------------- #
# BandWise Metrics #
# ---------------------------------------------------------------------------- #
def bandwise(func):
@functools.wraps(func)
def warpped(output, target, *args, **kwargs):
C = output.shape[-1]
total = 0
for ch in range(C):
x = output[:, :, ch]
y = target[:, :, ch]
total += func(x, y, *args, **kwargs)
return total / C
return warpped
@bandwise
def mpsnr(output, target, data_range=1):
return psnr(target, output, data_range=data_range)
def mssim(img1, img2, **kwargs):
return ssim(img1, img2, **kwargs)
def mpsnr_max(output, target):
""" Different from mpsnr, this function use max value of
each channel (instead of 1 or 255) as the peak signal.
"""
total = 0.
for k in range(target.shape[-1]):
peak = np.amax(target[:, :, k])**2
mse = np.mean((output[:, :, k]-target[:, :, k])**2)
total += 10*np.log10(peak / mse)
return total / target.shape[-1]
|
[
"numpy.sum",
"numpy.amax",
"skimage.metrics.structural_similarity",
"numpy.mean",
"numpy.real",
"functools.wraps",
"numpy.log10",
"skimage.metrics.peak_signal_noise_ratio",
"numpy.sqrt"
] |
[((291, 353), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['target', 'output'], {'data_range': 'data_range'}), '(target, output, data_range=data_range)\n', (314, 353), False, 'from skimage.metrics import structural_similarity, peak_signal_noise_ratio\n'), ((399, 458), 'skimage.metrics.structural_similarity', 'structural_similarity', (['img1', 'img2'], {'channel_axis': '(2)'}), '(img1, img2, channel_axis=2, **kwargs)\n', (420, 458), False, 'from skimage.metrics import structural_similarity, peak_signal_noise_ratio\n'), ((1354, 1375), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1369, 1375), False, 'import functools\n'), ((603, 630), 'numpy.sum', 'np.sum', (['(img1 * img2)'], {'axis': '(2)'}), '(img1 * img2, axis=2)\n', (609, 630), True, 'import numpy as np\n'), ((824, 838), 'numpy.real', 'np.real', (['angle'], {}), '(angle)\n', (831, 838), True, 'import numpy as np\n'), ((1049, 1067), 'numpy.sqrt', 'np.sqrt', (['(ergas / b)'], {}), '(ergas / b)\n', (1056, 1067), True, 'import numpy as np\n'), ((2117, 2166), 'numpy.mean', 'np.mean', (['((output[:, :, k] - target[:, :, k]) ** 2)'], {}), '((output[:, :, k] - target[:, :, k]) ** 2)\n', (2124, 2166), True, 'import numpy as np\n'), ((654, 679), 'numpy.sum', 'np.sum', (['(img1 ** 2)'], {'axis': '(2)'}), '(img1 ** 2, axis=2)\n', (660, 679), True, 'import numpy as np\n'), ((704, 729), 'numpy.sum', 'np.sum', (['(img2 ** 2)'], {'axis': '(2)'}), '(img2 ** 2, axis=2)\n', (710, 729), True, 'import numpy as np\n'), ((953, 1002), 'numpy.mean', 'np.mean', (['((target[:, :, i] - output[:, :, i]) ** 2)'], {}), '((target[:, :, i] - output[:, :, i]) ** 2)\n', (960, 1002), True, 'import numpy as np\n'), ((2075, 2099), 'numpy.amax', 'np.amax', (['target[:, :, k]'], {}), '(target[:, :, k])\n', (2082, 2099), True, 'import numpy as np\n'), ((2183, 2203), 'numpy.log10', 'np.log10', (['(peak / mse)'], {}), '(peak / mse)\n', (2191, 2203), True, 'import numpy as np\n'), ((1002, 1026), 'numpy.mean', 'np.mean', (['target[:, :, i]'], {}), '(target[:, :, i])\n', (1009, 1026), True, 'import numpy as np\n')]
|
# Generated by Django 1.9 on 2016-02-21 18:21
from django.db import migrations
def populate_course_types(apps, _schema_editor):
Course = apps.get_model('evaluation', 'Course')
CourseType = apps.get_model('evaluation', 'CourseType')
for course in Course.objects.all():
course.type = CourseType.objects.get(name_de=course.type_old)
course.save()
def revert_course_types(apps, _schema_editor):
Course = apps.get_model('evaluation', 'Course')
for course in Course.objects.all():
course.type_old = course.type.name_de
course.save()
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0044_add_course_type_model'),
]
operations = [
migrations.RunPython(populate_course_types, reverse_code=revert_course_types),
]
|
[
"django.db.migrations.RunPython"
] |
[((737, 814), 'django.db.migrations.RunPython', 'migrations.RunPython', (['populate_course_types'], {'reverse_code': 'revert_course_types'}), '(populate_course_types, reverse_code=revert_course_types)\n', (757, 814), False, 'from django.db import migrations\n')]
|
# _*_ coding: utf-8 _*_
"""
-------------------------------------------------
File Name: fm.py
Description :
Author : ericdoug
date:2021/3/19
-------------------------------------------------
Change Activity:
2021/3/19: created
-------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# sys packages
import os
import datetime
# third packages
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import TensorBoard
from collections import namedtuple, OrderedDict
# my packages
from recommender.recommender.framework.tf2.layers.embedding_lookup import EmbeddingLookup
from recommender.recommender.framework.tf2.layers.vocab_layer import VocabLayer
from recommender.recommender.framework.tf2.layers.add_layer import AddLayer
from recommender.recommender.framework.tf2.layers.embedding_lookup_sparse import EmbeddingLookupSparse
from recommender.recommender.framework.tf2.layers.hash_layer import HashLayer
from recommender.recommender.framework.tf2.layers.dense_to_sparsetensor import DenseToSparseTensor
from recommender.recommender.framework.tf2.layers.fm_layer import FMLayer
SparseFeat = namedtuple('SparseFeat', ['name', 'voc_size', 'hash_size', 'share_embed', 'embed_dim', 'dtype'])
DenseFeat = namedtuple('DenseFeat', ['name', 'pre_embed', 'reduce_type', 'dim', 'dtype'])
VarLenSparseFeat = namedtuple('VarLenSparseFeat',
['name', 'voc_size', 'hash_size', 'share_embed', 'weight_name', 'combiner', 'embed_dim',
'maxlen', 'dtype'])
# 筛选实体标签categorical 用于定义映射关系
DICT_CATEGORICAL = {"topic_id": [str(i) for i in range(0, 700)],
"keyword_id": [str(i) for i in range(0, 100)],
}
feature_columns = [
SparseFeat(name="topic_id", voc_size=700, hash_size=None, share_embed=None, embed_dim=8, dtype='int32'),
SparseFeat(name="keyword_id", voc_size=10, hash_size=None, share_embed=None, embed_dim=8, dtype='int32'),
SparseFeat(name='client_type', voc_size=2, hash_size=None, share_embed=None, embed_dim=8, dtype='int32'),
SparseFeat(name='post_type', voc_size=2, hash_size=None, share_embed=None, embed_dim=8, dtype='int32'),
VarLenSparseFeat(name="follow_topic_id", voc_size=700, hash_size=None, share_embed='topic_id', weight_name=None,
combiner='sum', embed_dim=8, maxlen=20, dtype='int32'),
VarLenSparseFeat(name="all_topic_fav_7", voc_size=700, hash_size=None, share_embed='topic_id',
weight_name='all_topic_fav_7_weight', combiner='sum', embed_dim=8, maxlen=5, dtype='int32'),
]
DEFAULT_VALUES = [[0], [''], [0.0], [0.0], [0.0],
[''], [''], [0.0]]
COL_NAME = ['act', 'client_id', 'client_type', 'post_type', 'topic_id', 'follow_topic_id', 'all_topic_fav_7',
'keyword_id']
def build_input_features(features_columns, prefix=''):
input_features = OrderedDict()
for feat_col in features_columns:
if isinstance(feat_col, DenseFeat):
input_features[feat_col.name] = Input([feat_col.dim], name=feat_col.name)
elif isinstance(feat_col, SparseFeat):
input_features[feat_col.name] = Input([1], name=feat_col.name, dtype=feat_col.dtype)
elif isinstance(feat_col, VarLenSparseFeat):
input_features[feat_col.name] = Input([None], name=feat_col.name, dtype=feat_col.dtype)
if feat_col.weight_name is not None:
input_features[feat_col.weight_name] = Input([None], name=feat_col.weight_name, dtype='float32')
else:
raise TypeError("Invalid feature column in build_input_features: {}".format(feat_col.name))
return input_features
def build_embedding_matrix(features_columns, linear_dim=None):
"""构造 自定义embedding层 matrix
:param features_columns:
:param linear_dim:
:return:
"""
embedding_matrix = {}
for feat_col in features_columns:
if isinstance(feat_col, SparseFeat) or isinstance(feat_col, VarLenSparseFeat):
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
vocab_size = feat_col.voc_size + 2
embed_dim = feat_col.embed_dim if linear_dim is None else 1
name_tag = '' if linear_dim is None else '_linear'
if vocab_name not in embedding_matrix:
embedding_matrix[vocab_name] = tf.Variable(
initial_value=tf.random.truncated_normal(shape=(vocab_size, embed_dim), mean=0.0,
stddev=0.001, dtype=tf.float32), trainable=True,
name=vocab_name + '_embed' + name_tag)
return embedding_matrix
def build_embedding_dict(features_columns):
embedding_dict = {}
embedding_matrix = build_embedding_matrix(features_columns)
for feat_col in features_columns:
if isinstance(feat_col, SparseFeat):
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
embedding_dict[feat_col.name] = EmbeddingLookup(embedding=embedding_matrix[vocab_name],
name='emb_lookup_' + feat_col.name)
elif isinstance(feat_col, VarLenSparseFeat):
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
if feat_col.combiner is not None:
if feat_col.weight_name is not None:
embedding_dict[feat_col.name] = EmbeddingLookupSparse(embedding=embedding_matrix[vocab_name],
combiner=feat_col.combiner, has_weight=True,
name='emb_lookup_sparse_' + feat_col.name)
else:
embedding_dict[feat_col.name] = EmbeddingLookupSparse(embedding=embedding_matrix[vocab_name],
combiner=feat_col.combiner,
name='emb_lookup_sparse_' + feat_col.name)
else:
embedding_dict[feat_col.name] = EmbeddingLookup(embedding=embedding_matrix[vocab_name],
name='emb_lookup_' + feat_col.name)
return embedding_dict
def build_linear_embedding_dict(features_columns):
embedding_dict = {}
embedding_matrix = build_embedding_matrix(features_columns, linear_dim=1)
name_tag = '_linear'
for feat_col in features_columns:
if isinstance(feat_col, SparseFeat):
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
embedding_dict[feat_col.name] = EmbeddingLookup(embedding=embedding_matrix[vocab_name],
name='emb_lookup_' + feat_col.name + name_tag)
elif isinstance(feat_col, VarLenSparseFeat):
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
if feat_col.combiner is not None:
if feat_col.weight_name is not None:
embedding_dict[feat_col.name] = EmbeddingLookupSparse(embedding=embedding_matrix[vocab_name],
combiner=feat_col.combiner, has_weight=True,
name='emb_lookup_sparse_' + feat_col.name + name_tag)
else:
embedding_dict[feat_col.name] = EmbeddingLookupSparse(embedding=embedding_matrix[vocab_name],
combiner=feat_col.combiner,
name='emb_lookup_sparse_' + feat_col.name + name_tag)
else:
embedding_dict[feat_col.name] = EmbeddingLookup(embedding=embedding_matrix[vocab_name],
name='emb_lookup_' + feat_col.name + name_tag)
return embedding_dict
def input_from_feature_columns(features, features_columns, embedding_dict):
sparse_embedding_list = []
dense_value_list = []
for feat_col in features_columns:
if isinstance(feat_col, SparseFeat):
_input = features[feat_col.name]
if feat_col.dtype == 'string':
if feat_col.hash_size is None:
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
keys = DICT_CATEGORICAL[vocab_name]
_input = VocabLayer(keys)(_input)
else:
_input = HashLayer(num_buckets=feat_col.hash_size, mask_zero=False)(_input)
embed = embedding_dict[feat_col.name](_input)
sparse_embedding_list.append(embed)
elif isinstance(feat_col, VarLenSparseFeat):
_input = features[feat_col.name]
if feat_col.dtype == 'string':
if feat_col.hash_size is None:
vocab_name = feat_col.share_embed if feat_col.share_embed else feat_col.name
keys = DICT_CATEGORICAL[vocab_name]
_input = VocabLayer(keys, mask_value='0')(_input)
else:
_input = HashLayer(num_buckets=feat_col.hash_size, mask_zero=True)(_input)
if feat_col.combiner is not None:
input_sparse = DenseToSparseTensor(mask_value=0)(_input)
if feat_col.weight_name is not None:
weight_sparse = DenseToSparseTensor()(features[feat_col.weight_name])
embed = embedding_dict[feat_col.name]([input_sparse, weight_sparse])
else:
embed = embedding_dict[feat_col.name](input_sparse)
else:
embed = embedding_dict[feat_col.name](_input)
sparse_embedding_list.append(embed)
elif isinstance(feat_col, DenseFeat):
dense_value_list.append(features[feat_col.name])
else:
raise TypeError("Invalid feature column in input_from_feature_columns: {}".format(feat_col.name))
return sparse_embedding_list, dense_value_list
def concat_func(inputs, axis=-1):
if len(inputs) == 1:
return inputs[0]
else:
return Concatenate(axis=axis)(inputs)
def combined_dnn_input(sparse_embedding_list, dense_value_list):
if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
sparse_dnn_input = Flatten()(concat_func(sparse_embedding_list))
dense_dnn_input = Flatten()(concat_func(dense_value_list))
return concat_func([sparse_dnn_input, dense_dnn_input])
elif len(sparse_embedding_list) > 0:
return Flatten()(concat_func(sparse_embedding_list))
elif len(dense_value_list) > 0:
return Flatten()(concat_func(dense_value_list))
else:
raise ("dnn_feature_columns can not be empty list")
def get_linear_logit(sparse_embedding_list, dense_value_list):
if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
sparse_linear_layer = Add()(sparse_embedding_list)
sparse_linear_layer = Flatten()(sparse_linear_layer)
dense_linear = concat_func(dense_value_list)
dense_linear_layer = Dense(1)(dense_linear)
linear_logit = Add()([dense_linear_layer, sparse_linear_layer])
return linear_logit
elif len(sparse_embedding_list) > 0:
sparse_linear_layer = Add()(sparse_embedding_list)
sparse_linear_layer = Flatten()(sparse_linear_layer)
return sparse_linear_layer
elif len(dense_value_list) > 0:
dense_linear = concat_func(dense_value_list)
dense_linear_layer = Dense(1)(dense_linear)
return dense_linear_layer
else:
raise ("linear_feature_columns can not be empty list")
def FM(feature_columns):
"""Instantiates the FM Network architecture.
Args:
feature_columns: An iterable containing all the features used by fm model.
return: A Keras model instance.
"""
features = build_input_features(feature_columns)
sparse_feature_columns = list(
filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if feature_columns else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), feature_columns)) if feature_columns else []
sparse_varlen_feature_columns = list(
filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if feature_columns else []
inputs_list = list(features.values())
# 构建 linear embedding_dict
linear_embedding_dict = build_linear_embedding_dict(feature_columns)
linear_sparse_embedding_list, linear_dense_value_list = input_from_feature_columns(features, feature_columns,
linear_embedding_dict)
# linear part
linear_logit = get_linear_logit(linear_sparse_embedding_list, linear_dense_value_list)
# 构建 embedding_dict
cross_columns = sparse_feature_columns + sparse_varlen_feature_columns
embedding_dict = build_embedding_dict(cross_columns)
sparse_embedding_list, _ = input_from_feature_columns(features, cross_columns, embedding_dict)
# 将所有sparse的k维embedding拼接起来,得到 (n, k)的矩阵,其中n为特征数,
concat_sparse_kd_embed = Concatenate(axis=1)(sparse_embedding_list) # ?, n, k
# cross part
fm_cross_logit = FMLayer()(concat_sparse_kd_embed)
final_logit = Add()([fm_cross_logit, linear_logit])
output = tf.keras.layers.Activation("sigmoid", name="fm_out")(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
|
[
"recommender.recommender.framework.tf2.layers.hash_layer.HashLayer",
"recommender.recommender.framework.tf2.layers.dense_to_sparsetensor.DenseToSparseTensor",
"tensorflow.random.truncated_normal",
"recommender.recommender.framework.tf2.layers.vocab_layer.VocabLayer",
"recommender.recommender.framework.tf2.layers.embedding_lookup.EmbeddingLookup",
"tensorflow.keras.models.Model",
"recommender.recommender.framework.tf2.layers.embedding_lookup_sparse.EmbeddingLookupSparse",
"recommender.recommender.framework.tf2.layers.fm_layer.FMLayer",
"tensorflow.keras.layers.Activation",
"collections.namedtuple",
"collections.OrderedDict"
] |
[((1501, 1601), 'collections.namedtuple', 'namedtuple', (['"""SparseFeat"""', "['name', 'voc_size', 'hash_size', 'share_embed', 'embed_dim', 'dtype']"], {}), "('SparseFeat', ['name', 'voc_size', 'hash_size', 'share_embed',\n 'embed_dim', 'dtype'])\n", (1511, 1601), False, 'from collections import namedtuple, OrderedDict\n'), ((1610, 1687), 'collections.namedtuple', 'namedtuple', (['"""DenseFeat"""', "['name', 'pre_embed', 'reduce_type', 'dim', 'dtype']"], {}), "('DenseFeat', ['name', 'pre_embed', 'reduce_type', 'dim', 'dtype'])\n", (1620, 1687), False, 'from collections import namedtuple, OrderedDict\n'), ((1707, 1850), 'collections.namedtuple', 'namedtuple', (['"""VarLenSparseFeat"""', "['name', 'voc_size', 'hash_size', 'share_embed', 'weight_name', 'combiner',\n 'embed_dim', 'maxlen', 'dtype']"], {}), "('VarLenSparseFeat', ['name', 'voc_size', 'hash_size',\n 'share_embed', 'weight_name', 'combiner', 'embed_dim', 'maxlen', 'dtype'])\n", (1717, 1850), False, 'from collections import namedtuple, OrderedDict\n'), ((3264, 3277), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3275, 3277), False, 'from collections import namedtuple, OrderedDict\n'), ((14146, 14187), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs_list', 'outputs': 'output'}), '(inputs=inputs_list, outputs=output)\n', (14151, 14187), False, 'from tensorflow.keras.models import Model\n'), ((13963, 13972), 'recommender.recommender.framework.tf2.layers.fm_layer.FMLayer', 'FMLayer', ([], {}), '()\n', (13970, 13972), False, 'from recommender.recommender.framework.tf2.layers.fm_layer import FMLayer\n'), ((14068, 14120), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""sigmoid"""'], {'name': '"""fm_out"""'}), "('sigmoid', name='fm_out')\n", (14094, 14120), True, 'import tensorflow as tf\n'), ((5404, 5499), 'recommender.recommender.framework.tf2.layers.embedding_lookup.EmbeddingLookup', 'EmbeddingLookup', ([], {'embedding': 'embedding_matrix[vocab_name]', 'name': "('emb_lookup_' + feat_col.name)"}), "(embedding=embedding_matrix[vocab_name], name='emb_lookup_' +\n feat_col.name)\n", (5419, 5499), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup import EmbeddingLookup\n'), ((7148, 7254), 'recommender.recommender.framework.tf2.layers.embedding_lookup.EmbeddingLookup', 'EmbeddingLookup', ([], {'embedding': 'embedding_matrix[vocab_name]', 'name': "('emb_lookup_' + feat_col.name + name_tag)"}), "(embedding=embedding_matrix[vocab_name], name='emb_lookup_' +\n feat_col.name + name_tag)\n", (7163, 7254), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup import EmbeddingLookup\n'), ((6568, 6663), 'recommender.recommender.framework.tf2.layers.embedding_lookup.EmbeddingLookup', 'EmbeddingLookup', ([], {'embedding': 'embedding_matrix[vocab_name]', 'name': "('emb_lookup_' + feat_col.name)"}), "(embedding=embedding_matrix[vocab_name], name='emb_lookup_' +\n feat_col.name)\n", (6583, 6663), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup import EmbeddingLookup\n'), ((8345, 8451), 'recommender.recommender.framework.tf2.layers.embedding_lookup.EmbeddingLookup', 'EmbeddingLookup', ([], {'embedding': 'embedding_matrix[vocab_name]', 'name': "('emb_lookup_' + feat_col.name + name_tag)"}), "(embedding=embedding_matrix[vocab_name], name='emb_lookup_' +\n feat_col.name + name_tag)\n", (8360, 8451), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup import EmbeddingLookup\n'), ((4788, 4892), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', ([], {'shape': '(vocab_size, embed_dim)', 'mean': '(0.0)', 'stddev': '(0.001)', 'dtype': 'tf.float32'}), '(shape=(vocab_size, embed_dim), mean=0.0, stddev=\n 0.001, dtype=tf.float32)\n', (4814, 4892), True, 'import tensorflow as tf\n'), ((5849, 6007), 'recommender.recommender.framework.tf2.layers.embedding_lookup_sparse.EmbeddingLookupSparse', 'EmbeddingLookupSparse', ([], {'embedding': 'embedding_matrix[vocab_name]', 'combiner': 'feat_col.combiner', 'has_weight': '(True)', 'name': "('emb_lookup_sparse_' + feat_col.name)"}), "(embedding=embedding_matrix[vocab_name], combiner=\n feat_col.combiner, has_weight=True, name='emb_lookup_sparse_' +\n feat_col.name)\n", (5870, 6007), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup_sparse import EmbeddingLookupSparse\n'), ((6221, 6358), 'recommender.recommender.framework.tf2.layers.embedding_lookup_sparse.EmbeddingLookupSparse', 'EmbeddingLookupSparse', ([], {'embedding': 'embedding_matrix[vocab_name]', 'combiner': 'feat_col.combiner', 'name': "('emb_lookup_sparse_' + feat_col.name)"}), "(embedding=embedding_matrix[vocab_name], combiner=\n feat_col.combiner, name='emb_lookup_sparse_' + feat_col.name)\n", (6242, 6358), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup_sparse import EmbeddingLookupSparse\n'), ((7604, 7773), 'recommender.recommender.framework.tf2.layers.embedding_lookup_sparse.EmbeddingLookupSparse', 'EmbeddingLookupSparse', ([], {'embedding': 'embedding_matrix[vocab_name]', 'combiner': 'feat_col.combiner', 'has_weight': '(True)', 'name': "('emb_lookup_sparse_' + feat_col.name + name_tag)"}), "(embedding=embedding_matrix[vocab_name], combiner=\n feat_col.combiner, has_weight=True, name='emb_lookup_sparse_' +\n feat_col.name + name_tag)\n", (7625, 7773), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup_sparse import EmbeddingLookupSparse\n'), ((7987, 8135), 'recommender.recommender.framework.tf2.layers.embedding_lookup_sparse.EmbeddingLookupSparse', 'EmbeddingLookupSparse', ([], {'embedding': 'embedding_matrix[vocab_name]', 'combiner': 'feat_col.combiner', 'name': "('emb_lookup_sparse_' + feat_col.name + name_tag)"}), "(embedding=embedding_matrix[vocab_name], combiner=\n feat_col.combiner, name='emb_lookup_sparse_' + feat_col.name + name_tag)\n", (8008, 8135), False, 'from recommender.recommender.framework.tf2.layers.embedding_lookup_sparse import EmbeddingLookupSparse\n'), ((9075, 9091), 'recommender.recommender.framework.tf2.layers.vocab_layer.VocabLayer', 'VocabLayer', (['keys'], {}), '(keys)\n', (9085, 9091), False, 'from recommender.recommender.framework.tf2.layers.vocab_layer import VocabLayer\n'), ((9151, 9209), 'recommender.recommender.framework.tf2.layers.hash_layer.HashLayer', 'HashLayer', ([], {'num_buckets': 'feat_col.hash_size', 'mask_zero': '(False)'}), '(num_buckets=feat_col.hash_size, mask_zero=False)\n', (9160, 9209), False, 'from recommender.recommender.framework.tf2.layers.hash_layer import HashLayer\n'), ((9930, 9963), 'recommender.recommender.framework.tf2.layers.dense_to_sparsetensor.DenseToSparseTensor', 'DenseToSparseTensor', ([], {'mask_value': '(0)'}), '(mask_value=0)\n', (9949, 9963), False, 'from recommender.recommender.framework.tf2.layers.dense_to_sparsetensor import DenseToSparseTensor\n'), ((9695, 9727), 'recommender.recommender.framework.tf2.layers.vocab_layer.VocabLayer', 'VocabLayer', (['keys'], {'mask_value': '"""0"""'}), "(keys, mask_value='0')\n", (9705, 9727), False, 'from recommender.recommender.framework.tf2.layers.vocab_layer import VocabLayer\n'), ((9787, 9844), 'recommender.recommender.framework.tf2.layers.hash_layer.HashLayer', 'HashLayer', ([], {'num_buckets': 'feat_col.hash_size', 'mask_zero': '(True)'}), '(num_buckets=feat_col.hash_size, mask_zero=True)\n', (9796, 9844), False, 'from recommender.recommender.framework.tf2.layers.hash_layer import HashLayer\n'), ((10061, 10082), 'recommender.recommender.framework.tf2.layers.dense_to_sparsetensor.DenseToSparseTensor', 'DenseToSparseTensor', ([], {}), '()\n', (10080, 10082), False, 'from recommender.recommender.framework.tf2.layers.dense_to_sparsetensor import DenseToSparseTensor\n')]
|
import argparse
import pprint
parser = argparse.ArgumentParser(description='argument parser')
# Misc
parser.add_argument('--ckpt_dir', type=str, default='./checkpoints/',
help='path for saving trained models')
parser.add_argument('--ckpt_path', type=str, default='./checkpoints/scacnn-model-10.pkl',
help='path for checkpont of trained models')
parser.add_argument('--result_dir', type=str, default='./results',
help='path for test output json file')
parser.add_argument('--gpu', type=str, default='0,1',
help='gpu ids to use')
parser.add_argument('--crop_size', type=int, default=224,
help='size for randomly cropping images')
parser.add_argument('--save_step', type=int, default=1,
help='num epochs for saving trained models')
parser.add_argument('--log_file', type=str, default='log.txt',
help='path for test output json file')
parser.add_argument('--beam_width', type=int, default=1,
help='beam width of beam search, default is 1, ie, greedy search')
# flags
parser.add_argument('--restore_train', action='store_true',
help='set this flag to restore training from previous checkponts')
parser.add_argument('--fine_tune', action='store_true',
help='set this flag to fine-tune ImageNet model')
# Optimizer
parser.add_argument('--num_epochs', type=int, default=100,
help='number of total epochs')
parser.add_argument('--batch_size', type=int, default=20,
help='number of batch size')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='learning rate for optimizer')
# Data
parser.add_argument('--num_workers', type=int, default=4,
help='number of workers for data loader')
parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl',
help='path for vocabulary wrapper')
parser.add_argument('--train_image_dir', type=str, default='./data/train2014',
help='directory for train resized images')
parser.add_argument('--train_caption_path', type=str,
default='./data/annotations/captions_train2014.json',
help='path for train annotation json file')
parser.add_argument('--val_image_dir', type=str, default='./data/val2014',
help='directory for val resized images')
parser.add_argument('--val_caption_path', type=str,
default='./data/annotations/captions_val2014.json',
help='path for val annotation json file')
# Model
parser.add_argument('--model', type=str, default='nic', choices=['nic', 'ssa', 'scacnn'],
help='name for model')
parser.add_argument('--att_mode', type=str, default='cs', choices=['cs', 'sc', 'c', 's'],
help='attention mode for scacnn')
parser.add_argument('--embed_size', type=int, default=100,
help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int, default=300,
help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int, default=1,
help='number of layers in lstm')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate for lstm')
def print_args(args):
pprint.pprint(vars(args))
|
[
"argparse.ArgumentParser"
] |
[((40, 94), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""argument parser"""'}), "(description='argument parser')\n", (63, 94), False, 'import argparse\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import mean_squared_error
# Power outage class
def f(row):
"""function that categories days with more than 8 outages as extreme,
3-8 outages as bad, and 0-2 as normal"""
if row['Total_outages'] > 8:
val = 2
elif row['Total_outages'] > 2:
val = 1
else:
val = 0
return val
# Load data function: load data for neural network training
# Input: None
# Output: x_train, y_train, x_test, y_test
def load_data():
data = pd.read_csv('../../Data/WeatherOutagesAllJerry.csv')
data = data.dropna(how = 'all')
data['category'] = data.apply(f, axis=1)
data.head()
# Seperate training and testing dataset
train,test=train_test_split(data,test_size=0.1,random_state=567)
x_train = train[['Day_length_hr','Avg_Temp_F','Avg_humidity_percent','Avg_windspeed_mph','Max_windspeed_mph',
'Precipitation_in','Event_thunderstorm']]
y_train = train['category']
x_test = test[['Day_length_hr','Avg_Temp_F','Avg_humidity_percent','Avg_windspeed_mph','Max_windspeed_mph',
'Precipitation_in','Event_thunderstorm']]
y_test = test['category']
# data normalization
x_train = preprocessing.normalize(x_train) # training dataset
x_test = preprocessing.normalize(x_test) #testing dataset
return x_train, y_train, x_test, y_test
# Oversample algoritm
# This function oversample from under-reprented class
# Input: X-feature, y-response, R1-oversample ratio for bad case, R2-oversample ratio for extreme case
# Output: X_resam, y_resam
def balance_sample(X, y, R1, R2):
from imblearn.over_sampling import RandomOverSampler
# Apply the random over-sampling
ros = RandomOverSampler(ratio=R1,random_state=6)
x_res, y_res = ros.fit_sample(X[y!=2], y[y!=2])
ros2 = RandomOverSampler(ratio=R2,random_state=6)
x_res2, y_res2 = ros2.fit_sample(X[y!=1], y[y!=1])
X_resam = np.concatenate((x_res,x_res2[y_res2==2]), axis=0)
y_resam = np.concatenate((y_res, y_res2[y_res2==2]),axis=0)
return X_resam, y_resam
def neural_network_clf(x_train, y_train, x_test, y_test):
clf = MLPClassifier(max_iter=1000,activation='identity', solver='lbfgs',
alpha=1e-5,hidden_layer_sizes=(5, 3), random_state=1)
clf.fit(x_train, y_train)
y_train_pred = clf.predict(x_train)
y_test_pred = clf.predict(x_test)
print("Train error for normalized data",mean_squared_error(y_train,y_train_pred))
print("Test error for normalized data",mean_squared_error(y_test,y_test_pred))
|
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"imblearn.over_sampling.RandomOverSampler",
"sklearn.preprocessing.normalize",
"sklearn.neural_network.MLPClassifier",
"sklearn.metrics.mean_squared_error",
"numpy.concatenate"
] |
[((787, 839), 'pandas.read_csv', 'pd.read_csv', (['"""../../Data/WeatherOutagesAllJerry.csv"""'], {}), "('../../Data/WeatherOutagesAllJerry.csv')\n", (798, 839), True, 'import pandas as pd\n'), ((997, 1052), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.1)', 'random_state': '(567)'}), '(data, test_size=0.1, random_state=567)\n', (1013, 1052), False, 'from sklearn.model_selection import train_test_split\n'), ((1498, 1530), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['x_train'], {}), '(x_train)\n', (1521, 1530), False, 'from sklearn import preprocessing\n'), ((1563, 1594), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['x_test'], {}), '(x_test)\n', (1586, 1594), False, 'from sklearn import preprocessing\n'), ((2002, 2045), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'ratio': 'R1', 'random_state': '(6)'}), '(ratio=R1, random_state=6)\n', (2019, 2045), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((2108, 2151), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'ratio': 'R2', 'random_state': '(6)'}), '(ratio=R2, random_state=6)\n', (2125, 2151), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((2221, 2273), 'numpy.concatenate', 'np.concatenate', (['(x_res, x_res2[y_res2 == 2])'], {'axis': '(0)'}), '((x_res, x_res2[y_res2 == 2]), axis=0)\n', (2235, 2273), True, 'import numpy as np\n'), ((2285, 2337), 'numpy.concatenate', 'np.concatenate', (['(y_res, y_res2[y_res2 == 2])'], {'axis': '(0)'}), '((y_res, y_res2[y_res2 == 2]), axis=0)\n', (2299, 2337), True, 'import numpy as np\n'), ((2435, 2563), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'max_iter': '(1000)', 'activation': '"""identity"""', 'solver': '"""lbfgs"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(5, 3)', 'random_state': '(1)'}), "(max_iter=1000, activation='identity', solver='lbfgs', alpha=\n 1e-05, hidden_layer_sizes=(5, 3), random_state=1)\n", (2448, 2563), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2735, 2776), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (2753, 2776), False, 'from sklearn.metrics import mean_squared_error\n'), ((2820, 2859), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (2838, 2859), False, 'from sklearn.metrics import mean_squared_error\n')]
|
# Generated by Django 2.1.2 on 2018-12-25 03:59
import backend.models.user
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='UserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=200)),
('name', models.CharField(max_length=50)),
('password', models.CharField(max_length=200)),
('deleted', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
],
options={
'db_table': 'user',
},
managers=[
('objects', backend.models.user.UserManager()),
],
),
migrations.CreateModel(
name='AwsEnvironmentModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.IntegerField(default=0)),
('name', models.CharField(max_length=200)),
('aws_account_id', models.CharField(max_length=200)),
('aws_role', models.CharField(max_length=200)),
('aws_external_id', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'aws_environment',
},
),
migrations.CreateModel(
name='EventModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'event',
},
),
migrations.CreateModel(
name='NotificationDestinationModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.IntegerField(default=0)),
('name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'notification_destination',
},
),
migrations.CreateModel(
name='NotificationGroupModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.IntegerField(default=0)),
('name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('aws_environments', models.ManyToManyField(related_name='notification_groups', to='backend.AwsEnvironmentModel')),
],
options={
'db_table': 'notification_group',
},
),
migrations.CreateModel(
name='RoleModel',
fields=[
('deleted', models.IntegerField(default=0)),
('id', models.IntegerField(primary_key=True, serialize=False)),
('role_name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'role',
},
),
migrations.CreateModel(
name='TenantModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.IntegerField(default=0)),
('tenant_name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=200)),
('tel', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'tenant',
},
),
migrations.CreateModel(
name='EmailDestination',
fields=[
('notificationdestinationmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='backend.NotificationDestinationModel')),
('address', models.EmailField(max_length=200)),
],
options={
'db_table': 'email_destination',
},
bases=('backend.notificationdestinationmodel', models.Model),
),
migrations.CreateModel(
name='ScheduleModel',
fields=[
('eventmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='backend.EventModel')),
('name', models.CharField(max_length=200)),
('action', models.CharField(max_length=200)),
('params', models.CharField(blank=True, max_length=200, null=True)),
('notification', models.BooleanField()),
('resource_id', models.CharField(max_length=200)),
('service', models.CharField(max_length=50)),
('region', models.CharField(max_length=50)),
],
options={
'db_table': 'schedule',
},
bases=('backend.eventmodel', models.Model),
),
migrations.AddField(
model_name='notificationgroupmodel',
name='destinations',
field=models.ManyToManyField(related_name='notification_groups', to='backend.NotificationDestinationModel'),
),
migrations.AddField(
model_name='notificationgroupmodel',
name='tenant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notification_groups', to='backend.TenantModel'),
),
migrations.AddField(
model_name='notificationdestinationmodel',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_backend.notificationdestinationmodel_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='notificationdestinationmodel',
name='tenant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notification_destinations', to='backend.TenantModel'),
),
migrations.AddField(
model_name='eventmodel',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_backend.eventmodel_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='awsenvironmentmodel',
name='tenant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aws_environments', to='backend.TenantModel'),
),
migrations.AddField(
model_name='usermodel',
name='aws_environments',
field=models.ManyToManyField(to='backend.AwsEnvironmentModel'),
),
migrations.AddField(
model_name='usermodel',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='usermodel',
name='role',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='users', to='backend.RoleModel'),
),
migrations.AddField(
model_name='usermodel',
name='tenant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='backend.TenantModel'),
),
migrations.AddField(
model_name='usermodel',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.AddField(
model_name='schedulemodel',
name='aws_environment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='schedules', to='backend.AwsEnvironmentModel'),
),
migrations.AlterUniqueTogether(
name='awsenvironmentmodel',
unique_together={('aws_account_id', 'deleted')},
),
migrations.AlterUniqueTogether(
name='usermodel',
unique_together={('email', 'deleted')},
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.EmailField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField",
"django.db.migrations.AlterUniqueTogether"
] |
[((10624, 10736), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""awsenvironmentmodel"""', 'unique_together': "{('aws_account_id', 'deleted')}"}), "(name='awsenvironmentmodel', unique_together=\n {('aws_account_id', 'deleted')})\n", (10654, 10736), False, 'from django.db import migrations, models\n'), ((10780, 10872), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""usermodel"""', 'unique_together': "{('email', 'deleted')}"}), "(name='usermodel', unique_together={('email',\n 'deleted')})\n", (10810, 10872), False, 'from django.db import migrations, models\n'), ((7417, 7523), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""notification_groups"""', 'to': '"""backend.NotificationDestinationModel"""'}), "(related_name='notification_groups', to=\n 'backend.NotificationDestinationModel')\n", (7439, 7523), False, 'from django.db import migrations, models\n'), ((7659, 7788), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""notification_groups"""', 'to': '"""backend.TenantModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='notification_groups', to='backend.TenantModel')\n", (7676, 7788), False, 'from django.db import migrations, models\n'), ((7941, 8146), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""polymorphic_backend.notificationdestinationmodel_set+"""', 'to': '"""contenttypes.ContentType"""'}), "(editable=False, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name=\n 'polymorphic_backend.notificationdestinationmodel_set+', to=\n 'contenttypes.ContentType')\n", (7958, 8146), False, 'from django.db import migrations, models\n'), ((8278, 8413), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""notification_destinations"""', 'to': '"""backend.TenantModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='notification_destinations', to='backend.TenantModel')\n", (8295, 8413), False, 'from django.db import migrations, models\n'), ((8548, 8729), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""polymorphic_backend.eventmodel_set+"""', 'to': '"""contenttypes.ContentType"""'}), "(editable=False, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='polymorphic_backend.eventmodel_set+',\n to='contenttypes.ContentType')\n", (8565, 8729), False, 'from django.db import migrations, models\n'), ((8858, 8984), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""aws_environments"""', 'to': '"""backend.TenantModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='aws_environments', to='backend.TenantModel')\n", (8875, 8984), False, 'from django.db import migrations, models\n'), ((9117, 9173), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""backend.AwsEnvironmentModel"""'}), "(to='backend.AwsEnvironmentModel')\n", (9139, 9173), False, 'from django.db import migrations, models\n'), ((9301, 9552), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""The groups this user belongs to. A user will get all permissions granted to each of their groups."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Group"""', 'verbose_name': '"""groups"""'}), "(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to='auth.Group',\n verbose_name='groups')\n", (9323, 9552), False, 'from django.db import migrations, models\n'), ((9664, 9777), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""users"""', 'to': '"""backend.RoleModel"""'}), "(on_delete=django.db.models.deletion.PROTECT, related_name\n ='users', to='backend.RoleModel')\n", (9681, 9777), False, 'from django.db import migrations, models\n'), ((9900, 10015), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""users"""', 'to': '"""backend.TenantModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='users', to='backend.TenantModel')\n", (9917, 10015), False, 'from django.db import migrations, models\n'), ((10148, 10352), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""Specific permissions for this user."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Permission"""', 'verbose_name': '"""user permissions"""'}), "(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions')\n", (10170, 10352), False, 'from django.db import migrations, models\n'), ((10479, 10606), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""schedules"""', 'to': '"""backend.AwsEnvironmentModel"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='schedules', to='backend.AwsEnvironmentModel')\n", (10496, 10606), False, 'from django.db import migrations, models\n'), ((502, 595), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (518, 595), False, 'from django.db import migrations, models\n'), ((626, 696), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""last login"""'}), "(blank=True, null=True, verbose_name='last login')\n", (646, 696), False, 'from django.db import migrations, models\n'), ((733, 904), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates that this user has all permissions without explicitly assigning them."""', 'verbose_name': '"""superuser status"""'}), "(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')\n", (752, 904), False, 'from django.db import migrations, models\n'), ((924, 957), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (941, 957), False, 'from django.db import migrations, models\n'), ((986, 1017), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1002, 1017), False, 'from django.db import migrations, models\n'), ((1050, 1082), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1066, 1082), False, 'from django.db import migrations, models\n'), ((1114, 1144), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1133, 1144), False, 'from django.db import migrations, models\n'), ((1179, 1218), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1199, 1218), False, 'from django.db import migrations, models\n'), ((1253, 1288), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1273, 1288), False, 'from django.db import migrations, models\n'), ((1321, 1464), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates whether the user can log into this admin site."""', 'verbose_name': '"""staff status"""'}), "(default=False, help_text=\n 'Designates whether the user can log into this admin site.',\n verbose_name='staff status')\n", (1340, 1464), False, 'from django.db import migrations, models\n'), ((1489, 1670), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""', 'verbose_name': '"""active"""'}), "(default=True, help_text=\n 'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n , verbose_name='active')\n", (1508, 1670), False, 'from django.db import migrations, models\n'), ((1992, 2085), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2008, 2085), False, 'from django.db import migrations, models\n'), ((2113, 2143), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2132, 2143), False, 'from django.db import migrations, models\n'), ((2172, 2204), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2188, 2204), False, 'from django.db import migrations, models\n'), ((2243, 2275), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2259, 2275), False, 'from django.db import migrations, models\n'), ((2308, 2340), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2324, 2340), False, 'from django.db import migrations, models\n'), ((2380, 2412), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2396, 2412), False, 'from django.db import migrations, models\n'), ((2447, 2486), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2467, 2486), False, 'from django.db import migrations, models\n'), ((2521, 2556), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2541, 2556), False, 'from django.db import migrations, models\n'), ((2785, 2878), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2801, 2878), False, 'from django.db import migrations, models\n'), ((2906, 2936), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2925, 2936), False, 'from django.db import migrations, models\n'), ((2971, 3010), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2991, 3010), False, 'from django.db import migrations, models\n'), ((3045, 3080), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3065, 3080), False, 'from django.db import migrations, models\n'), ((3317, 3410), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3333, 3410), False, 'from django.db import migrations, models\n'), ((3438, 3468), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3457, 3468), False, 'from django.db import migrations, models\n'), ((3497, 3528), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (3513, 3528), False, 'from django.db import migrations, models\n'), ((3563, 3602), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3583, 3602), False, 'from django.db import migrations, models\n'), ((3637, 3672), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (3657, 3672), False, 'from django.db import migrations, models\n'), ((3922, 4015), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3938, 4015), False, 'from django.db import migrations, models\n'), ((4043, 4073), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4062, 4073), False, 'from django.db import migrations, models\n'), ((4102, 4133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (4118, 4133), False, 'from django.db import migrations, models\n'), ((4168, 4207), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4188, 4207), False, 'from django.db import migrations, models\n'), ((4242, 4277), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4262, 4277), False, 'from django.db import migrations, models\n'), ((4318, 4415), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""notification_groups"""', 'to': '"""backend.AwsEnvironmentModel"""'}), "(related_name='notification_groups', to=\n 'backend.AwsEnvironmentModel')\n", (4340, 4415), False, 'from django.db import migrations, models\n'), ((4646, 4676), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4665, 4676), False, 'from django.db import migrations, models\n'), ((4703, 4757), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (4722, 4757), False, 'from django.db import migrations, models\n'), ((4791, 4822), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (4807, 4822), False, 'from django.db import migrations, models\n'), ((4857, 4896), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4877, 4896), False, 'from django.db import migrations, models\n'), ((4931, 4966), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4951, 4966), False, 'from django.db import migrations, models\n'), ((5185, 5278), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5201, 5278), False, 'from django.db import migrations, models\n'), ((5306, 5336), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5325, 5336), False, 'from django.db import migrations, models\n'), ((5372, 5404), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (5388, 5404), False, 'from django.db import migrations, models\n'), ((5434, 5467), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (5451, 5467), False, 'from django.db import migrations, models\n'), ((5495, 5527), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (5511, 5527), False, 'from django.db import migrations, models\n'), ((5562, 5601), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (5582, 5601), False, 'from django.db import migrations, models\n'), ((5636, 5671), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (5656, 5671), False, 'from django.db import migrations, models\n'), ((5927, 6117), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""backend.NotificationDestinationModel"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'backend.NotificationDestinationModel')\n", (5947, 6117), False, 'from django.db import migrations, models\n'), ((6139, 6172), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (6156, 6172), False, 'from django.db import migrations, models\n'), ((6493, 6665), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""backend.EventModel"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'backend.EventModel')\n", (6513, 6665), False, 'from django.db import migrations, models\n'), ((6684, 6716), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (6700, 6716), False, 'from django.db import migrations, models\n'), ((6747, 6779), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (6763, 6779), False, 'from django.db import migrations, models\n'), ((6810, 6865), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (6826, 6865), False, 'from django.db import migrations, models\n'), ((6902, 6923), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (6921, 6923), False, 'from django.db import migrations, models\n'), ((6959, 6991), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (6975, 6991), False, 'from django.db import migrations, models\n'), ((7023, 7054), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (7039, 7054), False, 'from django.db import migrations, models\n'), ((7085, 7116), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (7101, 7116), False, 'from django.db import migrations, models\n')]
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["StructureMapModelMode"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class StructureMapModelMode:
"""
StructureMapModelMode
How the referenced structure is used in this mapping.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/map-model-mode
"""
source = CodeSystemConcept(
{
"code": "source",
"definition": "This structure describes an instance passed to the mapping engine that is used a source of data.",
"display": "Source Structure Definition",
}
)
"""
Source Structure Definition
This structure describes an instance passed to the mapping engine that is used a source of data.
"""
queried = CodeSystemConcept(
{
"code": "queried",
"definition": "This structure describes an instance that the mapping engine may ask for that is used a source of data.",
"display": "Queried Structure Definition",
}
)
"""
Queried Structure Definition
This structure describes an instance that the mapping engine may ask for that is used a source of data.
"""
target = CodeSystemConcept(
{
"code": "target",
"definition": "This structure describes an instance passed to the mapping engine that is used a target of data.",
"display": "Target Structure Definition",
}
)
"""
Target Structure Definition
This structure describes an instance passed to the mapping engine that is used a target of data.
"""
produced = CodeSystemConcept(
{
"code": "produced",
"definition": "This structure describes an instance that the mapping engine may ask to create that is used a target of data.",
"display": "Produced Structure Definition",
}
)
"""
Produced Structure Definition
This structure describes an instance that the mapping engine may ask to create that is used a target of data.
"""
class Meta:
resource = _resource
|
[
"pathlib.Path",
"oops_fhir.utils.CodeSystemConcept"
] |
[((474, 676), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'source', 'definition':\n 'This structure describes an instance passed to the mapping engine that is used a source of data.'\n , 'display': 'Source Structure Definition'}"], {}), "({'code': 'source', 'definition':\n 'This structure describes an instance passed to the mapping engine that is used a source of data.'\n , 'display': 'Source Structure Definition'})\n", (491, 676), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((894, 1105), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'queried', 'definition':\n 'This structure describes an instance that the mapping engine may ask for that is used a source of data.'\n , 'display': 'Queried Structure Definition'}"], {}), "({'code': 'queried', 'definition':\n 'This structure describes an instance that the mapping engine may ask for that is used a source of data.'\n , 'display': 'Queried Structure Definition'})\n", (911, 1105), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((1330, 1532), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'target', 'definition':\n 'This structure describes an instance passed to the mapping engine that is used a target of data.'\n , 'display': 'Target Structure Definition'}"], {}), "({'code': 'target', 'definition':\n 'This structure describes an instance passed to the mapping engine that is used a target of data.'\n , 'display': 'Target Structure Definition'})\n", (1347, 1532), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((1751, 1970), 'oops_fhir.utils.CodeSystemConcept', 'CodeSystemConcept', (["{'code': 'produced', 'definition':\n 'This structure describes an instance that the mapping engine may ask to create that is used a target of data.'\n , 'display': 'Produced Structure Definition'}"], {}), "({'code': 'produced', 'definition':\n 'This structure describes an instance that the mapping engine may ask to create that is used a target of data.'\n , 'display': 'Produced Structure Definition'})\n", (1768, 1970), False, 'from oops_fhir.utils import CodeSystemConcept\n'), ((195, 209), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (199, 209), False, 'from pathlib import Path\n')]
|
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib import admin
from internets import urls as internets_urls
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'django.views.generic.simple.direct_to_template',
{'template': 'index.html'}, name="index"),
url(r'^api/', include(internets_urls)),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
[
"django.contrib.admin.autodiscover"
] |
[((210, 230), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (228, 230), False, 'from django.contrib import admin\n')]
|
"""
Filename: count_words.py
Date: 2019-07-21
Author: <NAME>
E-mail: <EMAIL>
License:
The code is licensed under MIT License. Please read the LICENSE file in
this distribution for details regarding the licensing of this code.
Description:
Various visualizations by textual analysis of words.
"""
from collections import Counter
from typing import List
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from barchart_labeling import show_values_on_bars
ARTICLES = ["a", "an", "the"]
CONJUNCTIONS = ["for", "and", "nor", "but", "or", "yet", "so"]
LINKING_VERBS = [
"am",
"is",
"are",
"was",
"were",
"has",
"became",
"become",
"seem",
"seemed",
"appear",
"appeared",
"smell",
"sound",
"taste",
"feel",
]
PREPOSITIONS = [
"about",
"above",
"according to",
"across",
"after",
"against",
"ahead of",
"along",
"amidst",
"among",
"amongst",
"apart from",
"around",
"as",
"as far as",
"as well as",
"aside from",
"at",
"barring",
"because of",
"before",
"behind",
"below",
"beneath",
"beside",
"besides",
"between",
"beyond",
"by",
"by means of",
"circa",
"concerning",
"despite",
"down",
"due to",
"during",
"in",
"in accordance with",
"in addition to",
"in case of",
"in front of",
"in lieu of",
"in place of",
"in spite of",
"in to",
"inside",
"instead of",
"into",
"except",
"except for",
"excluding",
"for",
"following",
"from",
"like",
"minus",
"near",
"next",
"next to",
"past",
"per",
"prior to",
"round",
"since",
"off",
"on",
"on account of",
"on behalf of",
"on to",
"on top of",
"onto",
"opposite",
"out",
"out from",
"out of",
"outside",
"over",
"owing to",
"plus",
"than",
"through",
"throughout",
"till",
"times",
"to",
"toward",
"towards",
"under",
"underneath",
"unlike",
"until",
"unto",
"up",
"upon",
"via",
"with",
"with a view to",
"within",
"without",
]
PRONOUNS = [
"i",
"you",
"he",
"she",
"it",
"we",
"they",
"what",
"who",
"me",
"him",
"her",
"it",
"us",
"you",
"them",
"whom",
"mine",
"yours",
"his",
"hers",
"ours",
"theirs",
"this",
"that",
"these",
"those",
"who",
"whom",
"which",
"what",
"whose",
"whoever",
"whatever",
"whichever",
"whomever",
"who",
"whom",
"whose",
"which",
"that",
"what",
"whatever",
"whoever",
"whomever",
"whichever",
"myself",
"yourself",
"himself",
"herself",
"itself",
"ourselves",
"themselves",
"myself",
"yourself",
"himself",
"herself",
"itself",
"ourselves",
"themselves",
"each other",
"one another",
]
REST = [
"of",
"our",
"be",
"not",
"have",
"has",
"your",
"if",
"while",
"therefore",
"hence",
"thus",
"so",
"will",
"would",
"no",
"yes",
"it's",
"one",
"two",
"it",
"yours",
"their",
"they",
"its",
"when",
"just",
"because",
"my",
]
def most_common_words(filepath: str, number: int) -> List[str]:
"""Count the number of occurences of the certain word in a file.
Parameters:
filepath: a full path to the file
number: number of most common words
"""
data = pd.read_csv(filepath, na_filter=False, thousands=",")
ad_text = [text.split() for text in data["Ad Text"]]
ad_text_words = [
item.lower() for text_list in ad_text for item in text_list
]
ad_text_words = [word for word in ad_text_words if word not in ARTICLES]
ad_text_words = [
word for word in ad_text_words if word not in CONJUNCTIONS
]
ad_text_words = [
word for word in ad_text_words if word not in LINKING_VERBS
]
ad_text_words = [word for word in ad_text_words if word not in PRONOUNS]
ad_text_words = [
word for word in ad_text_words if word not in PREPOSITIONS
]
ad_text_words = [word for word in ad_text_words if word not in REST]
return Counter(ad_text_words).most_common(number)
def word_counter(filepath: str, word: str) -> int:
"""Count the number of occurences of the certain word in a file.
Parameters:
filepath: a full path to the file
word: a word to be counted
"""
data = pd.read_csv(filepath, na_filter=False, thousands=",")
count = 0
for ad_text in data["Ad Text"]:
if word in ad_text:
count += 1
return count
def main() -> None:
"""The main function."""
top_25 = most_common_words("../data/csv/all/all.csv", 25)
word_counts = [i[1] for i in top_25]
word_names = [i[0] for i in top_25]
sns.set(
color_codes=True, rc={"figure.figsize": (15.0, 9.0)}, style="darkgrid"
)
# Plot and save the barchart
_, _ = plt.subplots(1, 1)
sns_plot = sns.barplot(word_counts, word_names)
sns_plot.set_title("Most Common Words in 2015, 2016, and 2017 Combined")
show_values_on_bars(sns_plot, space=max(word_counts) * 0.5 / 100)
figure = sns_plot.get_figure()
figure_name = "barchart_word_counts.png"
figure.savefig(figure_name)
if __name__ == "__main__":
main()
|
[
"pandas.read_csv",
"seaborn.barplot",
"matplotlib.pyplot.subplots",
"collections.Counter",
"seaborn.set"
] |
[((3723, 3776), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'na_filter': '(False)', 'thousands': '""","""'}), "(filepath, na_filter=False, thousands=',')\n", (3734, 3776), True, 'import pandas as pd\n'), ((4740, 4793), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'na_filter': '(False)', 'thousands': '""","""'}), "(filepath, na_filter=False, thousands=',')\n", (4751, 4793), True, 'import pandas as pd\n'), ((5113, 5192), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)', 'rc': "{'figure.figsize': (15.0, 9.0)}", 'style': '"""darkgrid"""'}), "(color_codes=True, rc={'figure.figsize': (15.0, 9.0)}, style='darkgrid')\n", (5120, 5192), True, 'import seaborn as sns\n'), ((5252, 5270), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (5264, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5322), 'seaborn.barplot', 'sns.barplot', (['word_counts', 'word_names'], {}), '(word_counts, word_names)\n', (5297, 5322), True, 'import seaborn as sns\n'), ((4462, 4484), 'collections.Counter', 'Counter', (['ad_text_words'], {}), '(ad_text_words)\n', (4469, 4484), False, 'from collections import Counter\n')]
|
"""资源授权
"""
# -*- coding:utf-8 -*-
#import init_env
import time
from splinter import Browser
from customCompany.resource import Resource
from resourcePlatform.order import Order
from login_web import Loginzxy
class Grant:
"""资源授权
"""
def __init__(self):
self.browser = Browser("chrome")
def destroy(self) -> None:
"""销毁
"""
self.browser.quit()
def create(self):
"""资源授权
"""
try:
# 1.登录rastest9
browser = self.login_rastest9()
time.sleep(3)
# 2.创建订单
Order(browser)
# 3.登录 kedong.zhixueyun.com
browser = self.login_kedong()
# 4.获取资源
Resource(browser)
finally:
self.browser.quit()
def login_rastest9(self) -> Browser:
"""登录到rastest9
"""
rastest9 = Loginzxy(
self.browser, "https://rastest9.zhixueyun.com", "admin", "<PASSWORD>")
return rastest9.login()
def login_kedong(self) -> Browser:
"""登录到kedong.zhixueyun.com
"""
kedong = Loginzxy(
self.browser, "https://kedong.zhixueyun.com", "admin", "<PASSWORD>")
return kedong.login()
if __name__ == "__main__":
Grant().create()
|
[
"customCompany.resource.Resource",
"resourcePlatform.order.Order",
"time.sleep",
"login_web.Loginzxy",
"splinter.Browser"
] |
[((292, 309), 'splinter.Browser', 'Browser', (['"""chrome"""'], {}), "('chrome')\n", (299, 309), False, 'from splinter import Browser\n'), ((883, 962), 'login_web.Loginzxy', 'Loginzxy', (['self.browser', '"""https://rastest9.zhixueyun.com"""', '"""admin"""', '"""<PASSWORD>"""'], {}), "(self.browser, 'https://rastest9.zhixueyun.com', 'admin', '<PASSWORD>')\n", (891, 962), False, 'from login_web import Loginzxy\n'), ((1112, 1189), 'login_web.Loginzxy', 'Loginzxy', (['self.browser', '"""https://kedong.zhixueyun.com"""', '"""admin"""', '"""<PASSWORD>"""'], {}), "(self.browser, 'https://kedong.zhixueyun.com', 'admin', '<PASSWORD>')\n", (1120, 1189), False, 'from login_web import Loginzxy\n'), ((543, 556), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (553, 556), False, 'import time\n'), ((590, 604), 'resourcePlatform.order.Order', 'Order', (['browser'], {}), '(browser)\n', (595, 604), False, 'from resourcePlatform.order import Order\n'), ((720, 737), 'customCompany.resource.Resource', 'Resource', (['browser'], {}), '(browser)\n', (728, 737), False, 'from customCompany.resource import Resource\n')]
|
import simplejson as json
from datasets.packaged_modules.elasticsearch.elasticsearch import ElasticsearchBuilder
ca_file = "/Users/gdupont/src/github.com/bigscience-workshop/data-tooling/index_search/ca.cert"
with open(
"/Users/gdupont/src/github.com/bigscience-workshop/data-tooling/index_search/credentials.json"
) as f:
credentials = json.load(f)
the_host = credentials["connection"]["https"]["hosts"][0]["hostname"]
the_port = credentials["connection"]["https"]["hosts"][0]["port"]
username = credentials["connection"]["https"]["authentication"]["username"]
psw = credentials["connection"]["https"]["authentication"]["password"]
index_name = "oscar_unshuffled_deduplicated"
oscar_lang_code = "nn"
elasticsearch_builder = ElasticsearchBuilder(
host=the_host,
port=the_port,
es_username=username,
es_psw=psw,
ca_file=ca_file,
es_index_name=index_name,
es_index_config=None,
query="mykje arbeid og slit",
)
# elasticsearch_builder = ElasticsearchBuilder(
# host="localhost",
# port="9200",
# es_index_name="oscar_unshuffled_deduplicated",
# es_index_config=es_index_config,
# query='"mykje arbeid og slit"'
# )
elasticsearch_builder.download_and_prepare()
oscar_dataset_filtered = elasticsearch_builder.as_dataset()
print(oscar_dataset_filtered.keys())
first_split = next(iter(oscar_dataset_filtered))
for i in range(0, 5):
print(
f"- [#{oscar_dataset_filtered[first_split]['id'][i]}] {oscar_dataset_filtered[first_split]['text'][i]}"
)
|
[
"simplejson.load",
"datasets.packaged_modules.elasticsearch.elasticsearch.ElasticsearchBuilder"
] |
[((754, 942), 'datasets.packaged_modules.elasticsearch.elasticsearch.ElasticsearchBuilder', 'ElasticsearchBuilder', ([], {'host': 'the_host', 'port': 'the_port', 'es_username': 'username', 'es_psw': 'psw', 'ca_file': 'ca_file', 'es_index_name': 'index_name', 'es_index_config': 'None', 'query': '"""mykje arbeid og slit"""'}), "(host=the_host, port=the_port, es_username=username,\n es_psw=psw, ca_file=ca_file, es_index_name=index_name, es_index_config=\n None, query='mykje arbeid og slit')\n", (774, 942), False, 'from datasets.packaged_modules.elasticsearch.elasticsearch import ElasticsearchBuilder\n'), ((346, 358), 'simplejson.load', 'json.load', (['f'], {}), '(f)\n', (355, 358), True, 'import simplejson as json\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 19:03:06 2021
@author: leonl42
Unit test for testing if punctuation removal works correctly
"""
from scripts.preprocessing.punctuation_remover import PunctuationRemover
from scripts.util import COLUMN_TWEET
import unittest
import pandas as pd
class PunctuationRemoverTest(unittest.TestCase):
""""Test punctuation removal"""
def setUp(self):
self._df = pd.DataFrame()
self._df[COLUMN_TWEET] = ["This,, tweet##()() has a lot of !!punctuation%%"]
self._expected_output = "This tweet has a lot of punctuation"
self._punctuation_remover = PunctuationRemover()
def test_punctuation_removal(self):
""""Test punctuation removal on a predefined string"""
without_puncuation = self._punctuation_remover.fit_transform(self._df)
self.assertEqual(without_puncuation[COLUMN_TWEET][0], self._expected_output)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"scripts.preprocessing.punctuation_remover.PunctuationRemover",
"pandas.DataFrame"
] |
[((1006, 1021), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1019, 1021), False, 'import unittest\n'), ((460, 474), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (472, 474), True, 'import pandas as pd\n'), ((666, 686), 'scripts.preprocessing.punctuation_remover.PunctuationRemover', 'PunctuationRemover', ([], {}), '()\n', (684, 686), False, 'from scripts.preprocessing.punctuation_remover import PunctuationRemover\n')]
|
#!/usr/bin/python
from os import system, listdir
from webbrowser import open
def main():
try:
files = listdir(".")
for i in files:
if i == "manage.py":
system("python manage.py migrate")
system("python manage.py makemigrations salt")
system("python manage.py migrate")
open("http://localhost:2000")
system("python manage.py runserver 2000")
quit()
break
system("python ../manage.py migrate")
system("python ../manage.py makemigrations salt")
system("python ../manage.py migrate")
open("http://localhost:2000")
system("python ../manage.py runserver 2000")
except Exception as exp:
print("Something went wrong!")
print("Details : \n")
print(exp)
quit()
if __name__ == '__main__':
main()
|
[
"webbrowser.open",
"os.system",
"os.listdir"
] |
[((106, 118), 'os.listdir', 'listdir', (['"""."""'], {}), "('.')\n", (113, 118), False, 'from os import system, listdir\n'), ((393, 430), 'os.system', 'system', (['"""python ../manage.py migrate"""'], {}), "('python ../manage.py migrate')\n", (399, 430), False, 'from os import system, listdir\n'), ((433, 482), 'os.system', 'system', (['"""python ../manage.py makemigrations salt"""'], {}), "('python ../manage.py makemigrations salt')\n", (439, 482), False, 'from os import system, listdir\n'), ((485, 522), 'os.system', 'system', (['"""python ../manage.py migrate"""'], {}), "('python ../manage.py migrate')\n", (491, 522), False, 'from os import system, listdir\n'), ((525, 554), 'webbrowser.open', 'open', (['"""http://localhost:2000"""'], {}), "('http://localhost:2000')\n", (529, 554), False, 'from webbrowser import open\n'), ((557, 601), 'os.system', 'system', (['"""python ../manage.py runserver 2000"""'], {}), "('python ../manage.py runserver 2000')\n", (563, 601), False, 'from os import system, listdir\n'), ((165, 199), 'os.system', 'system', (['"""python manage.py migrate"""'], {}), "('python manage.py migrate')\n", (171, 199), False, 'from os import system, listdir\n'), ((204, 250), 'os.system', 'system', (['"""python manage.py makemigrations salt"""'], {}), "('python manage.py makemigrations salt')\n", (210, 250), False, 'from os import system, listdir\n'), ((255, 289), 'os.system', 'system', (['"""python manage.py migrate"""'], {}), "('python manage.py migrate')\n", (261, 289), False, 'from os import system, listdir\n'), ((294, 323), 'webbrowser.open', 'open', (['"""http://localhost:2000"""'], {}), "('http://localhost:2000')\n", (298, 323), False, 'from webbrowser import open\n'), ((328, 369), 'os.system', 'system', (['"""python manage.py runserver 2000"""'], {}), "('python manage.py runserver 2000')\n", (334, 369), False, 'from os import system, listdir\n')]
|
#!/usr/bin/python
##########################################################################
#
# MTraceCheck
# Copyright 2017 The Regents of the University of Michigan
# <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
import os
import sys
import argparse
import parse_weight
""" Log format for signatures
0: 0x0000000000b4d3fa 0x000000000b839f08
1: 0x0000000000b4d437 0x000000000b95ddff
2: 0x0000000000b51f37 0x00000000045f81b7
3: 0x0000000000b4d437 0x000000000b95ddd7
4: 0x0000000000b4d437 0x000000000b964ef7
5: 0x0000000000c91437 0x000000000b8e9d97
6: 0x0000000000b4d437 0x000000000b95ddd7
7: 0x0000000000b51c37 0x00000000045f81b7
8: 0x0000000000b4d437 0x000000000b95ddd7
9: 0x0000000000b4d437 0x0000000004609af7
10: 0x0000000000c91737 0x000000000b8e9dbf
...
131070: 0x0000000000c91437 0x000000000b95ddd7
131071: 0x0000000000b4d437 0x000000000b964ef7
0x0000000000b4d3fa 0x000000000b839f08: 1
0x0000000000b4d437 0x000000000317037f: 1
0x0000000000b4d437 0x00000000031703f7: 31
...
0x0000000000ca3437 0x000000000b8e9e0f: 1
0x0000000000ca3437 0x000000000b8e9e37: 1
0x0000000000ca3437 0x000000000b948c7f: 1
0x0000000000ca3437 0x000000000b94c497: 1
0x0000000000ca3437 0x000000000b953517: 3
0x0000000000ca3437 0x000000000b95ddd7: 2606
0x0000000000ca3437 0x000000000b964e57: 81
0x0000000000ca35b7 0x000000000b8df4d7: 2
Number of unique results 351 out of 131072
"""
""" Weights for parsing signatures (no indentation in real code)
Thread 0 Word 0
Profile 0
Profile 1
...
Thread 0 Word 1
...
Thread 1 Word 0
...
Thread 2 Word 0
...
For each profile element: "weight stride, # weights" (e.g., 4,5 => weights are 0, 4, 8, 12, 16)
"""
parser = argparse.ArgumentParser(description="Arguments for %s " % __file__)
parser.add_argument("--verbose", "-v", action="count", default=0)
parser.add_argument("--debug", "-d", action="store_true", default=False)
parser.add_argument("--signature-file", default=None)
parser.add_argument("--profile-file", default=None)
args = parser.parse_args()
assert(args.signature_file != None and args.profile_file != None)
verbosity = args.verbose
########################################################################
# Read ordered signature words
########################################################################
signatureFP = open(args.signature_file, "r")
signatureList = []
for line in signatureFP:
if not line.startswith(" 0x"):
continue
tokens = line.split(":")
signatureStrings = tokens[0].lstrip().split()
signatures = []
for string in signatureStrings:
signatures.append(int(string, 16))
signatureList.append(signatures)
signatureFP.close()
# Verify if the signatureList is fully ordered in ascending order
fullyOrdered = True
prevSignature = signatureList[0]
signatureLength = len(signatureList[0])
for signatureIdx in range(1,len(signatureList)):
signature = signatureList[signatureIdx]
assert(len(signature) == signatureLength)
for wordIdx in range(signatureLength):
if signature[wordIdx] > prevSignature[wordIdx]:
break
elif signature[wordIdx] < prevSignature[wordIdx]:
print("Warning: prevSignature %s currSignature %s" % (prevSignature, signature))
fullyOrdered = False
prevSignature = signature
if (fullyOrdered):
print("Info: signatures are fully ordered")
else:
print("Info: signatrues are NOT ordered fully")
if (verbosity > 0):
for signature in signatureList:
for perThreadSignature in signature:
sys.stdout.write(" 0x%X" % perThreadSignature)
sys.stdout.write("\n")
########################################################################
# Read signature weights
########################################################################
returnDict = parse_weight.parseWeights(args.profile_file)
weightList = returnDict['weightList']
numThreads = returnDict['numThreads']
numWordsPerThread = returnDict['numWordsPerThread']
if (verbosity > 0):
for weightWord in weightList:
print(weightWord)
# Reordering weights
# NOTE: THIS SHOULD BE CAREFULLY UNCOMMENTED IN CONJUNCTION WITH OTHER FILES (codegen_common.py, ANALYSIS TOOLS)
"""
assert(len(weightList) == numThreads * numWordsPerThread)
newWeightList = [[] for i in range(numThreads * numWordsPerThread)]
for t in range(numThreads):
for w in range(numWordsPerThread):
idx = numThreads * (numWordsPerThread - 1 - w) + t
newWeightList[idx] = weightList[t * numWordsPerThread + w]
weightList = newWeightList
if (verbosity > 0):
for weightWord in weightList:
print(weightWord)
"""
########################################################################
# Compute differences between two adjacent signatures
########################################################################
differenceList = []
numDiffLoadsList = []
if len(signatureList) > 1:
signatureLength = len(signatureList[0])
assert(len(weightList) == signatureLength)
for signatureIdx in range(1, len(signatureList)):
difference = []
for wordIdx in range(signatureLength):
difference.append(signatureList[signatureIdx][wordIdx] - signatureList[signatureIdx-1][wordIdx])
differenceList.append(difference)
for difference in differenceList:
numDiffLoads = 0
for wordIdx in range(signatureLength):
num = 0
currDiff = abs(difference[wordIdx])
for weightTuple in reversed(weightList[wordIdx]):
if (currDiff == 0):
break
weight = weightTuple[0]
possibilities = weightTuple[1]
#print("currDiff %d weight %d possibilities %d" % (currDiff, weight, possibilities))
if currDiff >= weight:
pathIdx = currDiff / weight
assert(pathIdx < possibilities)
currDiff -= pathIdx * weight
num += 1
assert(currDiff == 0)
numDiffLoads += num
numDiffLoadsList.append(numDiffLoads)
else:
print("Info: only 1 signature found")
if (verbosity > 0):
assert(len(differenceList) == len(numDiffLoadsList))
for idx in range(len(differenceList)):
print("%s - %s" % (differenceList[idx], numDiffLoadsList[idx]))
sumDiffLoads = 0
for idx in range(len(numDiffLoadsList)):
sumDiffLoads += numDiffLoadsList[idx]
print("Info: %d different loads from last graph" % sumDiffLoads)
|
[
"sys.stdout.write",
"argparse.ArgumentParser",
"parse_weight.parseWeights"
] |
[((2386, 2453), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Arguments for %s ' % __file__)"}), "(description='Arguments for %s ' % __file__)\n", (2409, 2453), False, 'import argparse\n'), ((4504, 4548), 'parse_weight.parseWeights', 'parse_weight.parseWeights', (['args.profile_file'], {}), '(args.profile_file)\n', (4529, 4548), False, 'import parse_weight\n'), ((4296, 4318), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (4312, 4318), False, 'import sys\n'), ((4241, 4287), 'sys.stdout.write', 'sys.stdout.write', (["(' 0x%X' % perThreadSignature)"], {}), "(' 0x%X' % perThreadSignature)\n", (4257, 4287), False, 'import sys\n')]
|
import os
from pre_push import run_checks
filepath = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.join(filepath, "..")
if __name__ == "__main__":
run_checks(project_root, verbose=True)
|
[
"os.path.abspath",
"os.path.join",
"pre_push.run_checks"
] |
[((112, 140), 'os.path.join', 'os.path.join', (['filepath', '""".."""'], {}), "(filepath, '..')\n", (124, 140), False, 'import os\n'), ((70, 95), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((173, 211), 'pre_push.run_checks', 'run_checks', (['project_root'], {'verbose': '(True)'}), '(project_root, verbose=True)\n', (183, 211), False, 'from pre_push import run_checks\n')]
|
from typing import List, Tuple
import pytest
from reparsec import Parser
from reparsec.sequence import sym
a = sym("a")
b = sym("b")
c = sym("c")
d = sym("d")
e = sym("e")
f = sym("f")
g = sym("g")
h = sym("h")
comma = sym(",")
DATA_POSITIVE: List[
Tuple[Parser[str, Tuple[str, ...]], str, Tuple[str, ...]]
] = [
((a << comma).then(b), "a,b", ("a", "b")),
((a << comma).then(b << comma), "a,b,", ("a", "b")),
((a << comma).then(b << comma).then(c), "a,b,c", ("a", "b", "c")),
(
(a << comma).then(b << comma).then(c << comma),
"a,b,c,",
("a", "b", "c")
),
(
(a << comma).then(b << comma).then(c << comma).then(d),
"a,b,c,d",
("a", "b", "c", "d")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma),
"a,b,c,d,",
("a", "b", "c", "d")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e),
"a,b,c,d,e",
("a", "b", "c", "d", "e")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma),
"a,b,c,d,e,",
("a", "b", "c", "d", "e")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma).then(f),
"a,b,c,d,e,f",
("a", "b", "c", "d", "e", "f")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma).then(f << comma),
"a,b,c,d,e,f,",
("a", "b", "c", "d", "e", "f")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma).then(f << comma).then(g),
"a,b,c,d,e,f,g",
("a", "b", "c", "d", "e", "f", "g")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma).then(f << comma).then(g << comma),
"a,b,c,d,e,f,g,",
("a", "b", "c", "d", "e", "f", "g")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma).then(f << comma).then(g << comma).then(h),
"a,b,c,d,e,f,g,h",
("a", "b", "c", "d", "e", "f", "g", "h")
),
(
(a << comma).then(b << comma).then(c << comma).then(d << comma)
.then(e << comma).then(f << comma).then(g << comma).then(h << comma),
"a,b,c,d,e,f,g,h,",
("a", "b", "c", "d", "e", "f", "g", "h")
),
(a.then(b).apply(lambda a, b: (b, a)), "ab", ("b", "a")),
(
a.then(b).then(c).apply(lambda a, b, c: (c, b, a)),
"abc",
("c", "b", "a")
),
(
a.then(b).then(c).then(d).apply(lambda a, b, c, d: (d, c, b, a)),
"abcd",
("d", "c", "b", "a")
),
(
a.then(b).then(c).then(d).then(e)
.apply(lambda a, b, c, d, e: (e, d, c, b, a)),
"abcde",
("e", "d", "c", "b", "a")
),
(
a.then(b).then(c).then(d).then(e).then(f)
.apply(lambda a, b, c, d, e, f: (f, e, d, c, b, a)),
"abcdef",
("f", "e", "d", "c", "b", "a")
),
(
a.then(b).then(c).then(d).then(e).then(f).then(g)
.apply(lambda a, b, c, d, e, f, g: (g, f, e, d, c, b, a)),
"abcdefg",
("g", "f", "e", "d", "c", "b", "a")
),
(
a.then(b).then(c).then(d).then(e).then(f).then(g).then(h)
.apply(lambda a, b, c, d, e, f, g, h: (h, g, f, e, d, c, b, a)),
"abcdefgh",
("h", "g", "f", "e", "d", "c", "b", "a")
),
]
@pytest.mark.parametrize("parser, data, value", DATA_POSITIVE)
def test_positive(parser: Parser[str, str], data: str, value: str) -> None:
assert parser.parse(data).unwrap() == value
|
[
"pytest.mark.parametrize",
"reparsec.sequence.sym"
] |
[((114, 122), 'reparsec.sequence.sym', 'sym', (['"""a"""'], {}), "('a')\n", (117, 122), False, 'from reparsec.sequence import sym\n'), ((127, 135), 'reparsec.sequence.sym', 'sym', (['"""b"""'], {}), "('b')\n", (130, 135), False, 'from reparsec.sequence import sym\n'), ((140, 148), 'reparsec.sequence.sym', 'sym', (['"""c"""'], {}), "('c')\n", (143, 148), False, 'from reparsec.sequence import sym\n'), ((153, 161), 'reparsec.sequence.sym', 'sym', (['"""d"""'], {}), "('d')\n", (156, 161), False, 'from reparsec.sequence import sym\n'), ((166, 174), 'reparsec.sequence.sym', 'sym', (['"""e"""'], {}), "('e')\n", (169, 174), False, 'from reparsec.sequence import sym\n'), ((179, 187), 'reparsec.sequence.sym', 'sym', (['"""f"""'], {}), "('f')\n", (182, 187), False, 'from reparsec.sequence import sym\n'), ((192, 200), 'reparsec.sequence.sym', 'sym', (['"""g"""'], {}), "('g')\n", (195, 200), False, 'from reparsec.sequence import sym\n'), ((205, 213), 'reparsec.sequence.sym', 'sym', (['"""h"""'], {}), "('h')\n", (208, 213), False, 'from reparsec.sequence import sym\n'), ((222, 230), 'reparsec.sequence.sym', 'sym', (['""","""'], {}), "(',')\n", (225, 230), False, 'from reparsec.sequence import sym\n'), ((3534, 3595), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""parser, data, value"""', 'DATA_POSITIVE'], {}), "('parser, data, value', DATA_POSITIVE)\n", (3557, 3595), False, 'import pytest\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'plotConfigTemplate.ui'
#
# Created: Sat Apr 21 14:42:02 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(258, 605)
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.averageGroup = QtGui.QGroupBox(Form)
self.averageGroup.setGeometry(QtCore.QRect(10, 200, 242, 182))
self.averageGroup.setToolTip(QtGui.QApplication.translate("Form", "Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available).", None, QtGui.QApplication.UnicodeUTF8))
self.averageGroup.setTitle(QtGui.QApplication.translate("Form", "Average", None, QtGui.QApplication.UnicodeUTF8))
self.averageGroup.setCheckable(True)
self.averageGroup.setChecked(False)
self.averageGroup.setObjectName(_fromUtf8("averageGroup"))
self.gridLayout_5 = QtGui.QGridLayout(self.averageGroup)
self.gridLayout_5.setMargin(0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.avgParamList = QtGui.QListWidget(self.averageGroup)
self.avgParamList.setObjectName(_fromUtf8("avgParamList"))
self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1)
self.decimateGroup = QtGui.QGroupBox(Form)
self.decimateGroup.setGeometry(QtCore.QRect(0, 70, 242, 160))
self.decimateGroup.setTitle(QtGui.QApplication.translate("Form", "Downsample", None, QtGui.QApplication.UnicodeUTF8))
self.decimateGroup.setCheckable(True)
self.decimateGroup.setObjectName(_fromUtf8("decimateGroup"))
self.gridLayout_4 = QtGui.QGridLayout(self.decimateGroup)
self.gridLayout_4.setMargin(0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.manualDecimateRadio = QtGui.QRadioButton(self.decimateGroup)
self.manualDecimateRadio.setText(QtGui.QApplication.translate("Form", "Manual", None, QtGui.QApplication.UnicodeUTF8))
self.manualDecimateRadio.setChecked(True)
self.manualDecimateRadio.setObjectName(_fromUtf8("manualDecimateRadio"))
self.gridLayout_4.addWidget(self.manualDecimateRadio, 0, 0, 1, 1)
self.downsampleSpin = QtGui.QSpinBox(self.decimateGroup)
self.downsampleSpin.setMinimum(1)
self.downsampleSpin.setMaximum(100000)
self.downsampleSpin.setProperty("value", 1)
self.downsampleSpin.setObjectName(_fromUtf8("downsampleSpin"))
self.gridLayout_4.addWidget(self.downsampleSpin, 0, 1, 1, 1)
self.autoDecimateRadio = QtGui.QRadioButton(self.decimateGroup)
self.autoDecimateRadio.setText(QtGui.QApplication.translate("Form", "Auto", None, QtGui.QApplication.UnicodeUTF8))
self.autoDecimateRadio.setChecked(False)
self.autoDecimateRadio.setObjectName(_fromUtf8("autoDecimateRadio"))
self.gridLayout_4.addWidget(self.autoDecimateRadio, 1, 0, 1, 1)
self.maxTracesCheck = QtGui.QCheckBox(self.decimateGroup)
self.maxTracesCheck.setToolTip(QtGui.QApplication.translate("Form", "If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed.", None, QtGui.QApplication.UnicodeUTF8))
self.maxTracesCheck.setText(QtGui.QApplication.translate("Form", "Max Traces:", None, QtGui.QApplication.UnicodeUTF8))
self.maxTracesCheck.setObjectName(_fromUtf8("maxTracesCheck"))
self.gridLayout_4.addWidget(self.maxTracesCheck, 2, 0, 1, 1)
self.maxTracesSpin = QtGui.QSpinBox(self.decimateGroup)
self.maxTracesSpin.setToolTip(QtGui.QApplication.translate("Form", "If multiple curves are displayed in this plot, check \"Max Traces\" and set this value to limit the number of traces that are displayed.", None, QtGui.QApplication.UnicodeUTF8))
self.maxTracesSpin.setObjectName(_fromUtf8("maxTracesSpin"))
self.gridLayout_4.addWidget(self.maxTracesSpin, 2, 1, 1, 1)
self.forgetTracesCheck = QtGui.QCheckBox(self.decimateGroup)
self.forgetTracesCheck.setToolTip(QtGui.QApplication.translate("Form", "If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden).", None, QtGui.QApplication.UnicodeUTF8))
self.forgetTracesCheck.setText(QtGui.QApplication.translate("Form", "Forget hidden traces", None, QtGui.QApplication.UnicodeUTF8))
self.forgetTracesCheck.setObjectName(_fromUtf8("forgetTracesCheck"))
self.gridLayout_4.addWidget(self.forgetTracesCheck, 3, 0, 1, 2)
self.transformGroup = QtGui.QFrame(Form)
self.transformGroup.setGeometry(QtCore.QRect(0, 0, 154, 79))
self.transformGroup.setObjectName(_fromUtf8("transformGroup"))
self.gridLayout = QtGui.QGridLayout(self.transformGroup)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.fftCheck = QtGui.QCheckBox(self.transformGroup)
self.fftCheck.setText(QtGui.QApplication.translate("Form", "Power Spectrum (FFT)", None, QtGui.QApplication.UnicodeUTF8))
self.fftCheck.setObjectName(_fromUtf8("fftCheck"))
self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1)
self.logXCheck = QtGui.QCheckBox(self.transformGroup)
self.logXCheck.setText(QtGui.QApplication.translate("Form", "Log X", None, QtGui.QApplication.UnicodeUTF8))
self.logXCheck.setObjectName(_fromUtf8("logXCheck"))
self.gridLayout.addWidget(self.logXCheck, 1, 0, 1, 1)
self.logYCheck = QtGui.QCheckBox(self.transformGroup)
self.logYCheck.setText(QtGui.QApplication.translate("Form", "Log Y", None, QtGui.QApplication.UnicodeUTF8))
self.logYCheck.setObjectName(_fromUtf8("logYCheck"))
self.gridLayout.addWidget(self.logYCheck, 2, 0, 1, 1)
self.pointsGroup = QtGui.QGroupBox(Form)
self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58))
self.pointsGroup.setTitle(QtGui.QApplication.translate("Form", "Points", None, QtGui.QApplication.UnicodeUTF8))
self.pointsGroup.setCheckable(True)
self.pointsGroup.setObjectName(_fromUtf8("pointsGroup"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.pointsGroup)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.autoPointsCheck = QtGui.QCheckBox(self.pointsGroup)
self.autoPointsCheck.setText(QtGui.QApplication.translate("Form", "Auto", None, QtGui.QApplication.UnicodeUTF8))
self.autoPointsCheck.setChecked(True)
self.autoPointsCheck.setObjectName(_fromUtf8("autoPointsCheck"))
self.verticalLayout_5.addWidget(self.autoPointsCheck)
self.gridGroup = QtGui.QFrame(Form)
self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81))
self.gridGroup.setObjectName(_fromUtf8("gridGroup"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridGroup)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.xGridCheck = QtGui.QCheckBox(self.gridGroup)
self.xGridCheck.setText(QtGui.QApplication.translate("Form", "Show X Grid", None, QtGui.QApplication.UnicodeUTF8))
self.xGridCheck.setObjectName(_fromUtf8("xGridCheck"))
self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2)
self.yGridCheck = QtGui.QCheckBox(self.gridGroup)
self.yGridCheck.setText(QtGui.QApplication.translate("Form", "Show Y Grid", None, QtGui.QApplication.UnicodeUTF8))
self.yGridCheck.setObjectName(_fromUtf8("yGridCheck"))
self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2)
self.gridAlphaSlider = QtGui.QSlider(self.gridGroup)
self.gridAlphaSlider.setMaximum(255)
self.gridAlphaSlider.setProperty("value", 70)
self.gridAlphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.gridAlphaSlider.setObjectName(_fromUtf8("gridAlphaSlider"))
self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1)
self.label = QtGui.QLabel(self.gridGroup)
self.label.setText(QtGui.QApplication.translate("Form", "Opacity", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1)
self.alphaGroup = QtGui.QGroupBox(Form)
self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60))
self.alphaGroup.setTitle(QtGui.QApplication.translate("Form", "Alpha", None, QtGui.QApplication.UnicodeUTF8))
self.alphaGroup.setCheckable(True)
self.alphaGroup.setObjectName(_fromUtf8("alphaGroup"))
self.horizontalLayout = QtGui.QHBoxLayout(self.alphaGroup)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.autoAlphaCheck = QtGui.QCheckBox(self.alphaGroup)
self.autoAlphaCheck.setText(QtGui.QApplication.translate("Form", "Auto", None, QtGui.QApplication.UnicodeUTF8))
self.autoAlphaCheck.setChecked(False)
self.autoAlphaCheck.setObjectName(_fromUtf8("autoAlphaCheck"))
self.horizontalLayout.addWidget(self.autoAlphaCheck)
self.alphaSlider = QtGui.QSlider(self.alphaGroup)
self.alphaSlider.setMaximum(1000)
self.alphaSlider.setProperty("value", 1000)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setObjectName(_fromUtf8("alphaSlider"))
self.horizontalLayout.addWidget(self.alphaSlider)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
pass
|
[
"PyQt4.QtGui.QGroupBox",
"PyQt4.QtGui.QApplication.translate",
"PyQt4.QtGui.QSpinBox",
"PyQt4.QtGui.QCheckBox",
"PyQt4.QtGui.QGridLayout",
"PyQt4.QtGui.QLabel",
"PyQt4.QtGui.QHBoxLayout",
"PyQt4.QtGui.QSlider",
"PyQt4.QtCore.QMetaObject.connectSlotsByName",
"PyQt4.QtGui.QVBoxLayout",
"PyQt4.QtGui.QRadioButton",
"PyQt4.QtCore.QRect",
"PyQt4.QtGui.QListWidget",
"PyQt4.QtGui.QFrame"
] |
[((637, 658), 'PyQt4.QtGui.QGroupBox', 'QtGui.QGroupBox', (['Form'], {}), '(Form)\n', (652, 658), False, 'from PyQt4 import QtCore, QtGui\n'), ((1298, 1334), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', (['self.averageGroup'], {}), '(self.averageGroup)\n', (1315, 1334), False, 'from PyQt4 import QtCore, QtGui\n'), ((1509, 1545), 'PyQt4.QtGui.QListWidget', 'QtGui.QListWidget', (['self.averageGroup'], {}), '(self.averageGroup)\n', (1526, 1545), False, 'from PyQt4 import QtCore, QtGui\n'), ((1709, 1730), 'PyQt4.QtGui.QGroupBox', 'QtGui.QGroupBox', (['Form'], {}), '(Form)\n', (1724, 1730), False, 'from PyQt4 import QtCore, QtGui\n'), ((2070, 2107), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (2087, 2107), False, 'from PyQt4 import QtCore, QtGui\n'), ((2289, 2327), 'PyQt4.QtGui.QRadioButton', 'QtGui.QRadioButton', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (2307, 2327), False, 'from PyQt4 import QtCore, QtGui\n'), ((2690, 2724), 'PyQt4.QtGui.QSpinBox', 'QtGui.QSpinBox', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (2704, 2724), False, 'from PyQt4 import QtCore, QtGui\n'), ((3039, 3077), 'PyQt4.QtGui.QRadioButton', 'QtGui.QRadioButton', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (3057, 3077), False, 'from PyQt4 import QtCore, QtGui\n'), ((3429, 3464), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (3444, 3464), False, 'from PyQt4 import QtCore, QtGui\n'), ((3991, 4025), 'PyQt4.QtGui.QSpinBox', 'QtGui.QSpinBox', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (4005, 4025), False, 'from PyQt4 import QtCore, QtGui\n'), ((4450, 4485), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.decimateGroup'], {}), '(self.decimateGroup)\n', (4465, 4485), False, 'from PyQt4 import QtCore, QtGui\n'), ((5047, 5065), 'PyQt4.QtGui.QFrame', 'QtGui.QFrame', (['Form'], {}), '(Form)\n', (5059, 5065), False, 'from PyQt4 import QtCore, QtGui\n'), ((5232, 5270), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', (['self.transformGroup'], {}), '(self.transformGroup)\n', (5249, 5270), False, 'from PyQt4 import QtCore, QtGui\n'), ((5358, 5394), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.transformGroup'], {}), '(self.transformGroup)\n', (5373, 5394), False, 'from PyQt4 import QtCore, QtGui\n'), ((5670, 5706), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.transformGroup'], {}), '(self.transformGroup)\n', (5685, 5706), False, 'from PyQt4 import QtCore, QtGui\n'), ((5971, 6007), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.transformGroup'], {}), '(self.transformGroup)\n', (5986, 6007), False, 'from PyQt4 import QtCore, QtGui\n'), ((6274, 6295), 'PyQt4.QtGui.QGroupBox', 'QtGui.QGroupBox', (['Form'], {}), '(Form)\n', (6289, 6295), False, 'from PyQt4 import QtCore, QtGui\n'), ((6626, 6661), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self.pointsGroup'], {}), '(self.pointsGroup)\n', (6643, 6661), False, 'from PyQt4 import QtCore, QtGui\n'), ((6768, 6801), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.pointsGroup'], {}), '(self.pointsGroup)\n', (6783, 6801), False, 'from PyQt4 import QtCore, QtGui\n'), ((7129, 7147), 'PyQt4.QtGui.QFrame', 'QtGui.QFrame', (['Form'], {}), '(Form)\n', (7141, 7147), False, 'from PyQt4 import QtCore, QtGui\n'), ((7304, 7337), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', (['self.gridGroup'], {}), '(self.gridGroup)\n', (7321, 7337), False, 'from PyQt4 import QtCore, QtGui\n'), ((7431, 7462), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.gridGroup'], {}), '(self.gridGroup)\n', (7446, 7462), False, 'from PyQt4 import QtCore, QtGui\n'), ((7740, 7771), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.gridGroup'], {}), '(self.gridGroup)\n', (7755, 7771), False, 'from PyQt4 import QtCore, QtGui\n'), ((8054, 8083), 'PyQt4.QtGui.QSlider', 'QtGui.QSlider', (['self.gridGroup'], {}), '(self.gridGroup)\n', (8067, 8083), False, 'from PyQt4 import QtCore, QtGui\n'), ((8413, 8441), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['self.gridGroup'], {}), '(self.gridGroup)\n', (8425, 8441), False, 'from PyQt4 import QtCore, QtGui\n'), ((8695, 8716), 'PyQt4.QtGui.QGroupBox', 'QtGui.QGroupBox', (['Form'], {}), '(Form)\n', (8710, 8716), False, 'from PyQt4 import QtCore, QtGui\n'), ((9041, 9075), 'PyQt4.QtGui.QHBoxLayout', 'QtGui.QHBoxLayout', (['self.alphaGroup'], {}), '(self.alphaGroup)\n', (9058, 9075), False, 'from PyQt4 import QtCore, QtGui\n'), ((9181, 9213), 'PyQt4.QtGui.QCheckBox', 'QtGui.QCheckBox', (['self.alphaGroup'], {}), '(self.alphaGroup)\n', (9196, 9213), False, 'from PyQt4 import QtCore, QtGui\n'), ((9539, 9569), 'PyQt4.QtGui.QSlider', 'QtGui.QSlider', (['self.alphaGroup'], {}), '(self.alphaGroup)\n', (9552, 9569), False, 'from PyQt4 import QtCore, QtGui\n'), ((9891, 9934), 'PyQt4.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (9928, 9934), False, 'from PyQt4 import QtCore, QtGui\n'), ((525, 612), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Form"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Form', None, QtGui.QApplication.\n UnicodeUTF8)\n", (553, 612), False, 'from PyQt4 import QtCore, QtGui\n'), ((697, 728), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(10)', '(200)', '(242)', '(182)'], {}), '(10, 200, 242, 182)\n', (709, 728), False, 'from PyQt4 import QtCore, QtGui\n'), ((767, 999), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available)."""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form',\n 'Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available).'\n , None, QtGui.QApplication.UnicodeUTF8)\n", (795, 999), False, 'from PyQt4 import QtCore, QtGui\n'), ((1027, 1117), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Average"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Average', None, QtGui.QApplication.\n UnicodeUTF8)\n", (1055, 1117), False, 'from PyQt4 import QtCore, QtGui\n'), ((1770, 1799), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(0)', '(70)', '(242)', '(160)'], {}), '(0, 70, 242, 160)\n', (1782, 1799), False, 'from PyQt4 import QtCore, QtGui\n'), ((1837, 1930), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Downsample"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Downsample', None, QtGui.QApplication\n .UnicodeUTF8)\n", (1865, 1930), False, 'from PyQt4 import QtCore, QtGui\n'), ((2369, 2458), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Manual"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Manual', None, QtGui.QApplication.\n UnicodeUTF8)\n", (2397, 2458), False, 'from PyQt4 import QtCore, QtGui\n'), ((3117, 3204), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Auto"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Auto', None, QtGui.QApplication.\n UnicodeUTF8)\n", (3145, 3204), False, 'from PyQt4 import QtCore, QtGui\n'), ((3504, 3702), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed."""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form',\n 'If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed.'\n , None, QtGui.QApplication.UnicodeUTF8)\n", (3532, 3702), False, 'from PyQt4 import QtCore, QtGui\n'), ((3731, 3825), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Max Traces:"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Max Traces:', None, QtGui.\n QApplication.UnicodeUTF8)\n", (3759, 3825), False, 'from PyQt4 import QtCore, QtGui\n'), ((4064, 4285), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""If multiple curves are displayed in this plot, check "Max Traces" and set this value to limit the number of traces that are displayed."""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), '(\'Form\',\n \'If multiple curves are displayed in this plot, check "Max Traces" and set this value to limit the number of traces that are displayed.\'\n , None, QtGui.QApplication.UnicodeUTF8)\n', (4092, 4285), False, 'from PyQt4 import QtCore, QtGui\n'), ((4528, 4736), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden)."""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form',\n 'If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden).'\n , None, QtGui.QApplication.UnicodeUTF8)\n", (4556, 4736), False, 'from PyQt4 import QtCore, QtGui\n'), ((4768, 4871), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Forget hidden traces"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Forget hidden traces', None, QtGui.\n QApplication.UnicodeUTF8)\n", (4796, 4871), False, 'from PyQt4 import QtCore, QtGui\n'), ((5106, 5133), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(154)', '(79)'], {}), '(0, 0, 154, 79)\n', (5118, 5133), False, 'from PyQt4 import QtCore, QtGui\n'), ((5425, 5528), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Power Spectrum (FFT)"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Power Spectrum (FFT)', None, QtGui.\n QApplication.UnicodeUTF8)\n", (5453, 5528), False, 'from PyQt4 import QtCore, QtGui\n'), ((5738, 5826), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Log X"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Log X', None, QtGui.QApplication.\n UnicodeUTF8)\n", (5766, 5826), False, 'from PyQt4 import QtCore, QtGui\n'), ((6039, 6127), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Log Y"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Log Y', None, QtGui.QApplication.\n UnicodeUTF8)\n", (6067, 6127), False, 'from PyQt4 import QtCore, QtGui\n'), ((6333, 6363), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(10)', '(550)', '(234)', '(58)'], {}), '(10, 550, 234, 58)\n', (6345, 6363), False, 'from PyQt4 import QtCore, QtGui\n'), ((6399, 6488), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Points"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Points', None, QtGui.QApplication.\n UnicodeUTF8)\n", (6427, 6488), False, 'from PyQt4 import QtCore, QtGui\n'), ((6839, 6926), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Auto"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Auto', None, QtGui.QApplication.\n UnicodeUTF8)\n", (6867, 6926), False, 'from PyQt4 import QtCore, QtGui\n'), ((7183, 7213), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(10)', '(460)', '(221)', '(81)'], {}), '(10, 460, 221, 81)\n', (7195, 7213), False, 'from PyQt4 import QtCore, QtGui\n'), ((7495, 7589), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Show X Grid"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Show X Grid', None, QtGui.\n QApplication.UnicodeUTF8)\n", (7523, 7589), False, 'from PyQt4 import QtCore, QtGui\n'), ((7804, 7898), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Show Y Grid"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Show Y Grid', None, QtGui.\n QApplication.UnicodeUTF8)\n", (7832, 7898), False, 'from PyQt4 import QtCore, QtGui\n'), ((8469, 8559), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Opacity"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Opacity', None, QtGui.QApplication.\n UnicodeUTF8)\n", (8497, 8559), False, 'from PyQt4 import QtCore, QtGui\n'), ((8753, 8783), 'PyQt4.QtCore.QRect', 'QtCore.QRect', (['(10)', '(390)', '(234)', '(60)'], {}), '(10, 390, 234, 60)\n', (8765, 8783), False, 'from PyQt4 import QtCore, QtGui\n'), ((8818, 8906), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Alpha"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Alpha', None, QtGui.QApplication.\n UnicodeUTF8)\n", (8846, 8906), False, 'from PyQt4 import QtCore, QtGui\n'), ((9250, 9337), 'PyQt4.QtGui.QApplication.translate', 'QtGui.QApplication.translate', (['"""Form"""', '"""Auto"""', 'None', 'QtGui.QApplication.UnicodeUTF8'], {}), "('Form', 'Auto', None, QtGui.QApplication.\n UnicodeUTF8)\n", (9278, 9337), False, 'from PyQt4 import QtCore, QtGui\n')]
|
import uuid
from arjuna.interact.gui.gom.impl.namestore import GuiNameStore
from arjuna.interact.gui.gom.impl.gui import Gui
from arjuna.tpi.enums import ArjunaOption
class GuiHandlerManager:
def __init__(self, project_config):
self.__name_store = GuiNameStore()
self.__namespace_dir = project_config.arjuna_config.value(ArjunaOption.GUIAUTO_NAMESPACE_DIR)
self.__gui_map = {}
@property
def name_store(self):
return self.__name_store
@property
def namespace_dir(self):
return self.__namespace_dir
def add_to_gui_map(self, setu_id, gui_handler):
self.__gui_map[setu_id] = gui_handler
def take_action(self, automator_handler, action_type, json_args):
if action_type == GuiActionType.CREATE_GUI:
gui_handler = self.create_gui(automator_handler, json_args)
return {"guiSetuId" : gui_handler.setu_id}
elif action_type == GuiActionType.CREATE_CHILD_GUI:
return self.create_child_gui(automator_handler, json_args)
else:
raise Exception("Unknown action got for Gui Mananger: " + action_type.name)
def create_gui(self, automator_handler, json_args):
label = json_args["label"]
defPath = json_args["defFileName"]
gui = Gui(self.__name_store, self.__namespace_dir, automator_handler.automator, label, defPath)
gui_handler = GuiHandler(self, automator_handler, gui)
self.add_to_gui_map(gui.setu_id, gui_handler)
return gui_handler
def create_child_gui(self, automator_handler, json_args):
parent_gui_setu_id = json_args.pop("parentGuiSetuId")
parent_gui_handler = self.get_gui_handler(parent_gui_setu_id)
return parent_gui_handler.create_gui(automator_handler, **json_args)
def get_gui_handler(self, setu_id):
return self.__gui_map[setu_id]
# Arg names of methods show JSON names, so don't follow Python conventions.
class GuiHandler:
def __init__(self, gui_mgr, automator_handler, gui):
self.__guimgr = gui_mgr
self.__gui = gui
self.__automator_handler = automator_handler
self.__automator = None
@property
def gui(self):
return self.__gui
@property
def setu_id(self):
return self.__gui.setu_id
@property
def automator_handler(self):
return self.__automator_handler
def create_gui(self, automator_handler, label=None, name=None, qualName=None, defFileName=None):
gui = Gui(self.__guimgr.name_store, self.__guimgr.namespace_dir, automator_handler.automator, label, defFileName)
gui_handler = GuiHandler(self.__guimgr, automator_handler, gui)
self.__guimgr.add_to_gui_map(gui.setu_id, gui_handler)
return {"guiSetuId": gui.setu_id}
def take_action(self, action_type, json_args):
action_id = HANDLER_MAP[action_type]
method_name, replaceable = HANDLER_NAME_MAP[action_id]
return getattr(self, method_name)(action_type.name.replace(replaceable, "").lower(), json_args)
def take_direct_action(self, action, json_args):
Handler._pop_arg(json_args, "automatorSetuId")
return getattr(self, action)(**json_args)
def define_element(self, locators):
emd = self.gui.get_emd(locators)
return self.automator_handler.define_element_with_emd(emd)
def define_multielement(self, locators):
emd = self.gui.get_emd(locators)
return self.automator_handler.define_multielement_with_emd(emd)
def define_dropdown(self, locators):
emd = self.gui.get_emd(locators)
return self.automator_handler.define_dropdown_with_emd(emd)
def define_radiogroup(self, locators):
emd = self.gui.get_emd(locators)
return self.automator_handler.define_radiogroup_with_emd(emd)
def define_frame(self, locators):
emd = self.gui.get_emd(locators)
return self.automator_handler.define_frame_with_emd(emd)
def define_alert(self):
return self.automator_handler.define_alert()
def define_main_window(self):
return self.automator_handler.get_main_window()
def set_slomo(self, on, interval=None):
self.automator_handler.set_slomo(on, interval)
def take_window_action(self, action, json_dict):
return self.automator_handler.take_window_action(action, json_dict)
def take_main_window_action(self, action, json_dict):
return self.automator_handler.take_main_window_action(action, json_dict)
def take_child_window_action(self, action, json_dict):
return self.automator_handler.take_child_window_action(action, json_dict)
def take_browser_action(self, action, json_dict):
return self.automator_handler.take_browser_action(action, json_dict)
def take_element_action(self, action, json_dict):
return self.automator_handler.take_element_action(action, json_dict)
def take_multielement_action(self, action, json_dict):
return self.automator_handler.take_multielement_action(action, json_dict)
def take_dropdown_action(self, action, json_dict):
return self.automator_handler.take_dropdown_action(action, json_dict)
def take_radiogroup_action(self, action, json_dict):
return self.automator_handler.take_radiogroup_action(action, json_dict)
def take_alert_action(self, action, json_dict):
return self.automator_handler.take_alert_action(action, json_dict)
def take_domroot_action(self, action, json_dict):
return self.automator_handler.take_domroot_action(action, json_dict)
def take_frame_action(self, action, json_dict):
return self.automator_handler.take_frame_action(action, json_dict)
|
[
"arjuna.interact.gui.gom.impl.gui.Gui",
"arjuna.interact.gui.gom.impl.namestore.GuiNameStore"
] |
[((262, 276), 'arjuna.interact.gui.gom.impl.namestore.GuiNameStore', 'GuiNameStore', ([], {}), '()\n', (274, 276), False, 'from arjuna.interact.gui.gom.impl.namestore import GuiNameStore\n'), ((1292, 1385), 'arjuna.interact.gui.gom.impl.gui.Gui', 'Gui', (['self.__name_store', 'self.__namespace_dir', 'automator_handler.automator', 'label', 'defPath'], {}), '(self.__name_store, self.__namespace_dir, automator_handler.automator,\n label, defPath)\n', (1295, 1385), False, 'from arjuna.interact.gui.gom.impl.gui import Gui\n'), ((2510, 2621), 'arjuna.interact.gui.gom.impl.gui.Gui', 'Gui', (['self.__guimgr.name_store', 'self.__guimgr.namespace_dir', 'automator_handler.automator', 'label', 'defFileName'], {}), '(self.__guimgr.name_store, self.__guimgr.namespace_dir,\n automator_handler.automator, label, defFileName)\n', (2513, 2621), False, 'from arjuna.interact.gui.gom.impl.gui import Gui\n')]
|
import logging
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from profess.Http_commands import Http_commands
import os
from data_management.utils import Utils
from plotter.comparison import Comparison
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__file__)
class Plotter:
def __init__(self, url):
self.url_dsf_se = url
self.httpClass = Http_commands()
self.utils = Utils()
self.compare = Comparison()
logger.debug("Plotter created")
def get_connection_topology(self, id):
query = self.url_dsf_se + "/se/simulation/" + id + "/connections"
response = self.httpClass.get(query)
json_response = response.json()
# logger.debug("type json_response "+str(type(json_response)))
if response.status_code == 200:
return json_response
else:
logger.error("Failed to get voltages, response from dsf-se:" + str(json_response))
return 1
def get_data_voltages(self, id, name=None, phase_number = 1):
query = self.url_dsf_se+"/se/simulation/"+ id +"/voltages"
logger.debug("query: "+str(query))
response = self.httpClass.get(query)
json_response = response.json()
#logger.debug("json_response "+str(json_response))
if response.status_code == 200:
if name == None:
data = json_response["voltages"]
data_to_return = {}
for node_name, value in data.items():
if not node_name in data_to_return.keys():
data_to_return[node_name] = {}
for phase_name, value_phase in value.items():
if phase_name == "Phase "+str(phase_number):
data_to_return[node_name] = value_phase
len_data = len(value_phase)
limit_list_high = [1.1 for i in range(len_data)]
limit_list_low = [0.9 for i in range(len_data)]
data_to_return["higher limit"] = limit_list_high
data_to_return["lower limit"] = limit_list_low
return data_to_return
else:
data = json_response["voltages"][name.split(".")[0]]
data_to_return = {}
data_to_return[name] = {}
for phase_name, value_phase in data.items():
if phase_name == "Phase " + str(phase_number):
data_to_return[name] = value_phase
len_data = len(value_phase)
limit_list_high = [1.1 for i in range(len_data)]
limit_list_low = [0.9 for i in range(len_data)]
data_to_return["higher limit"] = limit_list_high
data_to_return["lower limit"] = limit_list_low
return data_to_return
else:
logger.error("Failed to get voltages, response from dsf-se:" + str(json_response))
return 1
def get_data_soc(self, id, element="Storages", name=None):
possible_elements = ["EVs", "Storages"]
if not element in possible_elements:
logger.error("Wrong element. Possible elements " + str(possible_elements))
query = self.url_dsf_se + "/se/simulation/" + id + "/soc/"+element
logger.debug("query "+str(query))
response = self.httpClass.get(query)
json_response = response.json()
logger.debug("json_response "+str(json_response))
logger.debug("name "+str(name))
if response.status_code == 200:
if name == None:
return json_response["soc"][element]
else:
data_to_return = {}
data = json_response["soc"][element][name]
data_to_return[name] = [float(i) for i in data]
len_data= len(json_response["soc"][element][name])
return data_to_return
else:
logger.error("Failed to get voltages, response from dsf-se:" + str(json_response))
return 1
def get_data_usage_pv(self, id, element="Photovoltaic" ,name=None):
possible_elements = ["Photovoltaic"]
if not element in possible_elements:
logger.error("Wrong element. Possible elements " + str(possible_elements))
query = self.url_dsf_se + "/se/simulation/" + str(id) + "/usage/" + str(element)
response = self.httpClass.get(query)
json_response = response.json()
# logger.debug("type json_response "+str(type(json_response)))
if response.status_code == 200:
if name == None:
return json_response["usage"][element]
else:
data_to_return = {}
if name in json_response["usage"][element].keys():
data_to_return[name] = json_response["usage"][element][name]
elif name.lower() in json_response["usage"][element].keys():
data_to_return[name] = json_response["usage"][element][name.lower()]
return data_to_return
else:
logger.error("Failed to get usages, response from dsf-se:" + str(json_response))
return 1
def get_data_powers(self, id, element="Load", name=None):
possible_elements = ["Transformer","Load", "Photovoltaic", "EVs", "Storages"]
if not element in possible_elements:
logger.error("Wrong element. Possible elements "+str(possible_elements))
query = self.url_dsf_se+"/se/simulation/"+ str(id) +"/powers/" + str(element)
logger.debug("query "+str(query))
response = self.httpClass.get(query)
json_response = response.json()
logger.debug("json_response "+str(json_response))
if response.status_code == 200:
if name == None:
return json_response["powers"][element]
else:
data_to_return = {}
if name in json_response["powers"][element].keys():
data_to_return[name] = json_response["powers"][element][name]
elif name.lower() in json_response["powers"][element].keys():
data_to_return[name]=json_response["powers"][element][name.lower()]
return data_to_return
else:
logger.error("Failed to get voltages, response from dsf-se:" + str(json_response))
return 1
def get_data_voltages_at_node(self, id, node):
query = self.url_dsf_se+"/se/simulation/"+ id +"/voltages/"+node
response = self.httpClass.get(query)
json_response = response.json()
json_response=json_response["voltages"]
if response.status_code == 200:
return json_response
else:
logger.error("Failed to get voltages, response from dsf-se:" + str(json_response))
return 1
def get_active_powers(self, data_dict, phase_number=4):
new_data = {}
#logger.debug("data_dict "+str(data_dict))
if phase_number <= 3:
for name, values in data_dict.items():
if name not in new_data.keys():
new_data[name]={}
for phase, list_values in values.items():
if phase == "Phase "+str(phase_number):
power_per_phase=[complex(i).real for i in list_values]
new_data[name] = power_per_phase
else:
for name, values in data_dict.items():
if name not in new_data.keys():
new_data[name]={}
power_per_phase_1 =[]
power_per_phase_2 = []
power_per_phase_3 = []
for phase, list_values in values.items():
if phase == "Phase 1":
power_per_phase_1=[complex(i).real for i in list_values]
if phase == "Phase 2":
power_per_phase_2=[complex(i).real for i in list_values]
if phase == "Phase 3":
power_per_phase_3=[complex(i).real for i in list_values]
new_data[name] =[sum(x) for x in zip(power_per_phase_1, power_per_phase_2, power_per_phase_3)]
return new_data
def get_reactive_powers(self, data_dict, phase_number=4):
new_data = {}
if phase_number <= 3:
for name, values in data_dict.items():
if name not in new_data.keys():
new_data[name]={}
for phase, list_values in values.items():
if phase == "Phase "+str(phase_number):
power_per_phase=[complex(i).imag for i in list_values]
new_data[name] = power_per_phase
else:
for name, values in data_dict.items():
if name not in new_data.keys():
new_data[name]={}
power_per_phase_1 =[]
power_per_phase_2 = []
power_per_phase_3 = []
for phase, list_values in values.items():
if phase == "Phase 1":
power_per_phase_1=[complex(i).imag for i in list_values]
if phase == "Phase 2":
power_per_phase_2=[complex(i).imag for i in list_values]
if phase == "Phase 3":
power_per_phase_3=[complex(i).imag for i in list_values]
new_data[name] =[sum(x) for x in zip(power_per_phase_1, power_per_phase_2, power_per_phase_3)]
return new_data
def add_grid_values(self, data_dict_PV=None, data_dict_Load=None, data_dict_ESS=None, data_dict_EV=None):
data_list_PV = None
data_list_ESS = None
data_list_Load = None
data_list_EV = None
if not data_dict_PV == None and not data_dict_PV == {}:
for element, values in data_dict_PV.items():
data_list_PV = values
if not data_dict_Load == None and not data_dict_Load == {}:
for element, values in data_dict_Load.items():
data_list_Load = values
if not data_dict_ESS == None and not data_dict_ESS == {}:
for element, values in data_dict_ESS.items():
data_list_ESS = values
if not data_dict_EV == None and not data_dict_EV == {}:
for element, values in data_dict_EV.items():
data_list_EV = values
power_consumption =[]
power_generation = []
if not data_list_Load == None and not data_dict_Load == {} and not data_list_EV == None and not data_dict_EV == {}:
power_consumption = [sum(x) for x in zip(data_list_Load, data_list_EV)]
elif not data_list_Load == None:
power_consumption = data_list_Load
else:
logger.error("No data for power consumption")
#return None
#logger.debug("power_consumption "+str(power_consumption))
if not data_list_PV == None and not data_dict_PV == {} and not data_list_ESS == None and not data_dict_ESS == {}:
power_generation = [sum(x) for x in zip(data_list_PV, data_list_ESS)]
elif not data_list_PV == None:
power_generation = [x for x in data_list_PV]
else:
logger.error("No data for power generation")
#logger.debug("power_generation " + str(power_generation))
if not power_consumption == [] and not power_generation==[]:
#logger.debug("power consumption "+str(power_consumption))
power_generation = [-x for x in power_generation]
#logger.debug("power generation " + str(power_generation))
power_grid = [sum(x) for x in zip(power_consumption, power_generation)]
#logger.debug("power_grid "+str(power_grid))
elif not power_consumption == []:
power_grid = power_consumption
elif not power_generation == []:
power_grid = [-x for x in power_generation]
#logger.debug("power grid "+str(power_grid))
#power_consumption = [0]*n
#power_load + power_EV = power_PV + power_ess + p_grid
return {"grid_power":power_grid}
def plot(self, dict_voltages, file_name = None, xlabel= None, ylabel=None):
logger.debug("Entering to plot")
data = dict_voltages
#data["test"]= list(dict_voltages.values())[0]
data2 = {}
if "voltage" in file_name:
data2["higher limit"] = dict_voltages["higher limit"]
data2["lower limit"] = dict_voltages["lower limit"]
if "higher limit" in data.keys():
data.pop("higher limit")
if "lower limit" in data.keys():
data.pop("lower limit")
#logger.debug("data "+str(data))
for key, value in data.items():
len_data = len(value)
#for key, value in data.items():
#logger.debug("data "+str(data) +" type "+str(type(value[0])))
#obj= pd.DataFrame.from_dict(data)
obj = pd.DataFrame.from_dict(data, dtype=np.float32)
#logger.debug("obj "+str(obj))
if not data2== {}:
obj2 = pd.DataFrame.from_dict(data2)
sns.set(context="paper")
sns.set(context="paper", rc={'figure.figsize': (14, 8), "lines.linewidth": 1.5}, font_scale=2)
sns.set_style("ticks", {'grid.color': '.8'})
fig = plt.figure(figsize=(15,10))
ax1 = fig.subplots()
#continent_colors = ["#009432", "#0652DD", "#EE5A24", "#9980FA", "#B53471" ]
#sns.set_palette(sns.hls_palette(8, l=.1, s=.8))
number_colors = len(data)
logger.debug("file_name in plot "+str(file_name))
if not file_name.find("voltage") == -1 or not file_name.find("soc") == -1 or not file_name.find("usage") == -1 or not file_name.find("comparison") == -1:
palete = {}
if number_colors == 1:
for key in data.keys():
palete[key] = "#0652DD"
logger.debug("palete " + str(palete))
else:
palete = sns.hls_palette(number_colors, l=.5, s=.8)
elif not file_name.find("powers") == -1:
palete = {}
logger.debug("data "+str(data.keys()))
for key in data.keys():
logger.debug("key "+str(key))
logger.debug("palete " + str(palete))
if not key.find("grid") == -1:
logger.debug("Entered grid")
palete[key] = "#ED4C67" #"#0<PASSWORD>DD"
if not key.find("pv") == -1:
logger.debug("Entered pv")
palete[key] = "#F79F1F"
if not key.find("storage") == -1:
logger.debug("Entered storage")
palete[key] = "#1B1464"
if not key.find("ev") == -1:
logger.debug("Entered ev")
palete[key] = "#833471"
if not key.find("transformer") == -1:
logger.debug("Entered transformer")
palete[key] = "#0652DD"
if not key.find("TR1") == -1:
logger.debug("Entered TR1")
palete[key] = "#0652DD"
if not key.find("load") == -1 or not key.find("consumer") == -1:
logger.debug("Entered load")
palete[key] = "#009432"
logger.debug("palete "+str(palete))
g = sns.lineplot(data=obj, sort=False, ax=ax1, dashes=False, palette= palete)
if not data2 == {}:
logger.debug("g1 present")
g1 = sns.lineplot(data=obj2, sort=False, ax=ax1, legend="full", dashes=[[4,2], [1,4]], palette={"higher limit": "#EA2027","lower limit":"#EA2027"}) # just limits and "--"
#g.set_ylim(99.7,100.3)
#leg = g._legend.texts
#logger.debug("legend "+str(leg))
#g.set_xlim(0, 48)
# g.set_xticks(np.arange(24))
plt.xticks(rotation=0)
if not xlabel == None:
g.set_xlabel(xlabel)
if not ylabel == None:
g.set_ylabel(ylabel)
g.yaxis.grid(True)
# g.xaxis.set_label_coords(0.5, -0.15)
# Put a legend to the right side
# Removed 'ax' from T.W.'s answer here aswell:
box = g.get_position()
#g.legend(framealpha=1, frameon=True)
g.set_position([box.x0, box.y0, box.width * 0.85, box.height]) # resize position
g.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if not file_name == None:
file_name = file_name + ".pdf"#"./" + file_name + ".jpg"
path = self.utils.get_path(file_name)
self.utils.create_path(path)
logger.debug("filename "+str(path))
plt.savefig(path, dpi=1080, format="pdf", bbox_inches="tight")
#import sys
#sys.exit(0)
else:
plt.show()
def create_plots_for_socs(self, id, folder_name):
connections = self.get_connection_topology(id)
logger.debug("connections " + str(connections))
######################################################################
#################### powers ##########################################
######################################################################
for bus_name, element_object in connections.items():
logger.debug("##############################################################################")
logger.debug("bus name " + str(bus_name))
logger.debug("##############################################################################")
base = os.path.join("results", folder_name, bus_name)
element_type_to_get = None
for element_type, element_names in element_object.items():
if element_type == "storageUnits":
element_type_to_get = "Storages"
for name in element_names:
logger.debug("------------------------------------------------------------------------------")
logger.debug("name in soc "+str(name))
logger.debug("------------------------------------------------------------------------------")
if not element_type_to_get == None:
if element_type == "storageUnits":
xlabel = "Timestamp [h]"
ylabel = "SoC [%]"
soc_ESS = self.get_data_soc(id, element_type_to_get, name)
file_name_soc = os.path.join(base, "soc_ESS")
logger.debug("file_name_soc ESS " + str(file_name_soc))
self.utils.store_data(file_name_soc+".json", soc_ESS)
self.plot(soc_ESS, file_name=file_name_soc, xlabel=xlabel, ylabel=ylabel)
if element_type == "chargingStations":
element_type_to_get = "EVs"
for name in element_names:
logger.debug("name "+str(name))
for name_cs, values_ev in name.items():
name_ev = values_ev["ev"]
logger.debug("name in soc "+str(name_ev)+ " in charging station "+str(name_cs))
if not element_type_to_get == None:
if element_type == "chargingStations":
xlabel = "Timestamp [h]"
ylabel = "SoC [%]"
soc_ESS = self.get_data_soc(id, element_type_to_get, name_ev)
file_name_soc = os.path.join(base, "soc_EV")
logger.debug("file_name_soc EV "+str(file_name_soc))
self.utils.store_data(file_name_soc+".json", soc_ESS)
self.plot(soc_ESS, file_name=file_name_soc, xlabel=xlabel, ylabel=ylabel)
def create_plots_for_powers(self, id, folder_name):
connections = self.get_connection_topology(id)
logger.debug("connections " + str(connections))
######################################################################
#################### powers ##########################################
######################################################################
for bus_name, element_object in connections.items():
#if bus_name == "node_116757":
logger.debug("##############################################################################")
logger.debug("bus name " + str(bus_name))
logger.debug("##############################################################################")
base = os.path.join("results", folder_name, bus_name)
file_name_P = os.path.join(base, "powers_P")
file_name_Q = os.path.join(base, "powers_Q")
logger.debug("file name P: " + str(file_name_P))
logger.debug("file name Q: " + str(file_name_Q))
phase_number = 4
element_type_to_get = None
real_powers = {"photovoltaics" :None, "loads":None, "storageUnits":None, "chargingStations":None}
reactive_powers = {"photovoltaics" :None, "loads":None, "storageUnits":None, "chargingStations":None}
for element_type, element_names in element_object.items():
logger.debug("element type "+str(element_type))
element_type_to_get = None
if element_type == "transformers":
element_type_to_get = "Transformer"
if element_type == "loads":
element_type_to_get = "Load"
elif element_type == "photovoltaics":
element_type_to_get = "Photovoltaic"
elif element_type == "storageUnits":
element_type_to_get = "Storages"
elif element_type == "chargingStations":
element_type_to_get = "EVs"
logger.debug("element_type_to_get "+str(element_type_to_get))
for name in element_names:
logger.debug("------------------------------------------------------------------------------")
logger.debug("element name "+str(name))
logger.debug("------------------------------------------------------------------------------")
if not element_type_to_get == None:
if element_type == "chargingStations":
for name_cs, values_ev in name.items():
name_ev = values_ev["ev"]
powers = self.get_data_powers(id, str(element_type_to_get), str(name_ev))
else:
powers = self.get_data_powers(id, str(element_type_to_get), str(name))
if not element_type in real_powers.keys():
real_powers[element_type]={}
real_powers[element_type] = self.get_active_powers(powers, phase_number)
if element_type == "chargingStations":
real_powers[element_type][name_ev] = [-1*x for x in real_powers[element_type][name_ev]]
if not element_type in reactive_powers.keys():
reactive_powers[element_type]={}
reactive_powers[element_type] = self.get_reactive_powers(powers, phase_number)
if element_type == "chargingStations":
reactive_powers[element_type][name_ev] = [-1*x for x in reactive_powers[element_type][name_ev]]
if not element_type == "transformers":
xlabel = "Timestamp [h]"
ylabel = "Power [kW]"
real_power_grid = self.add_grid_values(real_powers["photovoltaics"], real_powers["loads"], real_powers["storageUnits"],
real_powers["chargingStations"])
reactive_power_grid = self.add_grid_values(reactive_powers["photovoltaics"], reactive_powers["loads"], reactive_powers["storageUnits"],
reactive_powers["chargingStations"])
if not real_power_grid == None:
PVs = real_powers["photovoltaics"] or {}
loads = real_powers["loads"] or {}
ESS = real_powers["storageUnits"] or {}
EVs = real_powers["chargingStations"] or {}
total_real_powers = {**real_power_grid, **loads, **PVs, **ESS, **EVs, }
#logger.debug("total_real_powers " + str(total_real_powers))
self.utils.store_data(file_name_P + ".json", total_real_powers)
self.plot(total_real_powers, file_name=file_name_P, xlabel=xlabel, ylabel=ylabel)
if not reactive_power_grid == None:
PVs_reactive = reactive_powers["photovoltaics"] or {}
loads_reactive = reactive_powers["loads"] or {}
ESS_reactive = reactive_powers["storageUnits"] or {}
EVs_reactive = reactive_powers["chargingStations"] or {}
total_reactive_powers = {**reactive_power_grid, **loads_reactive, **PVs_reactive, **ESS_reactive, **EVs_reactive}
#logger.debug("total_reactive_powers " + str(total_reactive_powers))
self.utils.store_data(file_name_Q + ".json", total_reactive_powers)
self.plot(total_reactive_powers, file_name=file_name_Q, xlabel=xlabel, ylabel=ylabel)
else:
xlabel = "Timestamp [h]"
ylabel = "Power [kW]"
if not real_powers == None:
data = real_powers["transformers"]
self.utils.store_data(file_name_P + ".json",data )
self.plot(data, file_name=file_name_P, xlabel=xlabel, ylabel=ylabel)
if not reactive_powers == None:
data = reactive_powers["transformers"]
self.utils.store_data(file_name_Q + ".json", data)
self.plot(data, file_name=file_name_Q, xlabel=xlabel, ylabel=ylabel)
def create_plots_for_pv_usage(self, id, folder_name):
connections = self.get_connection_topology(id)
logger.debug("connections " + str(connections))
usage_pvs_in_percent = self.get_data_usage_pv(id,"Photovoltaic")
xlabel = "Timestamp [h]"
ylabel = "PV usage [%]"
for bus_name, element_object in connections.items():
logger.debug("##############################################################################")
logger.debug("bus name " + str(bus_name))
logger.debug("##############################################################################")
base = os.path.join("results", folder_name, bus_name)#"results/" + folder_name + "/" + bus_name + "/"
file_name = os.path.join(base, "usage_PV")
for element_type, element_names in element_object.items():
if element_type == "photovoltaics":
for name in element_names:
data_to_plot={}
for name_in_usage, usage_list in usage_pvs_in_percent.items():
if name == name_in_usage:
usage_list=[float(x) for x in usage_list]
data_to_plot={name_in_usage: usage_list}
logger.debug("usage " + str(data_to_plot))
#if name == "pv_VEAB3cqi-":
#import sys
#sys.exit(0)
self.utils.store_data(file_name + ".json", data_to_plot)
self.plot(data_to_plot, file_name=file_name, xlabel=xlabel, ylabel=ylabel)
def create_plots_for_voltages(self, id, folder_name):
logger.debug("id "+str(id))
connections = self.get_connection_topology(id)
logger.debug("connections " + str(connections))
base_all_voltages = os.path.join("results", folder_name)
file_name_voltages = os.path.join(base_all_voltages, "all_voltages")
logger.debug("file_name_voltages " + str(file_name_voltages))
xlabel = "Timestamp [h]"
ylabel = "Voltage [pu]"
for i in range(3):
voltages = self.get_data_voltages(id, phase_number=(i + 1))
file_name = file_name_voltages + "_phase_" + str(i + 1)
#logger.debug("data voltages " + str(voltages))
self.utils.store_data(file_name+".json", voltages)
self.plot(voltages, file_name=file_name, xlabel=xlabel, ylabel=ylabel)
for bus_name, connected_elements in connections.items():
logger.debug("##############################################################################")
logger.debug("bus name " + str(bus_name))
logger.debug("##############################################################################")
xlabel = "Timestamp [h]"
ylabel = "Voltage [pu]"
base = os.path.join("results", folder_name, bus_name)
file_name_voltage_node = os.path.join(base,"voltage")
logger.debug("file_name_voltage_node "+str(file_name_voltage_node))
for i in range(3):
voltages_node = self.get_data_voltages(id, bus_name, phase_number=(i + 1))
voltages_node_to_store = voltages_node.copy()
file_name = file_name_voltage_node + "_phase_" + str(i + 1)
#logger.debug("voltages_node " + str(voltages_node))
self.utils.store_data(file_name + ".json", voltages_node)
self.plot(voltages_node, file_name=file_name, xlabel=xlabel, ylabel=ylabel)
#voltages_node_to_store["phase_"+ str(i + 1)]=voltages_node_to_store.pop(bus_name)
#logger.debug("voltages_node " + str(voltages_node_to_store))
def compare_files(self, base_path_list, file_path_to_store, data_file_name="powers_P.json", data_type="grid_power", list_plot_names=None):
paths = []
for base_path in base_path_list:
paths.append(os.path.join("results", base_path))
#self.compare.get_grid_data_from_file(paths)
node_names = []
folder_path = []
for base_path in paths:
currently_folder_path = self.utils.get_path(base_path)
logger.debug("currently_folder_path "+str(currently_folder_path))
folder_path.append(currently_folder_path)
try:
node_names = next(os.walk(currently_folder_path))[1]
logger.debug("node_names " + str(node_names))
except Exception as e:
logger.error("Folder path:"+ str(currently_folder_path)+" not existing")
logger.error(e)
break
xlabel = "Timestamp [h]"
ylabel = "Power [kW]"
data_to_plot = []
base_path_to_store = self.utils.get_path(os.path.join("results","comparison",file_path_to_store))
for node in node_names:
data = self.compare.get_grid_data_from_node(folder_path, node, data_file_name, data_type, list_plot_names)
if not data == {} and not data == []:
data_to_plot.append(data)
if not data_type == None:
if "." in data_file_name:
ending_file = data_file_name.split(".")[0]
else:
ending_file = data_file_name
complete_file_path_to_store = os.path.join(base_path_to_store, node, ending_file, data_type)
else:
if "." in data_file_name:
ending_file = data_file_name.split(".")[0]
else:
ending_file = data_file_name
complete_file_path_to_store = os.path.join(base_path_to_store, node, ending_file)
#logger.debug("data "+str(data))
self.plot(data, file_name=complete_file_path_to_store, xlabel=xlabel, ylabel=ylabel)
#logger.debug("data_to_plot " + str(data_to_plot))
def main():
#url = "http://192.168.99.100:9091"
url = "http://localhost:9091"
plotter = Plotter(url)
comparison = False
if not comparison:
logger.debug("Reading information from dsf-se")
#id = "fae0303a69c7" #with ESS self-production
#folder_name = "Bolzano_residential_1_CS_self-production"
#id = "06bbd8919911" # with ESS self-production
#folder_name = "Bolzano_residential_1_CS_self-production_5_15h_Steps_5_5"
#id = "44ec450b2e2e" # with ESS self-production
#folder_name = "Bolzano_residential_1_CS_self-production_5_15h_Steps_10_5"
#id = "0bca45d7da48" # with ESS self-production
#folder_name = "Bolzano_residential_1_CS_self-production_5_16h_Steps_10_5"
#id = "f29b9d4e9f20" # with ESS self-production
#folder_name = "virtual_capacity_MinimizeCosts_ev_one_charging_station_9_18_Steps_10_5_4kW_cs"
#id = "38baa359edce" # with ESS self-production
#folder_name = "virtual_capacity_Maximize Self-Production_ev_one_charging_station_9_18_Steps_10_5_4kW_cs_udp_10"
#id = "3b9424eaa2eb" # with ESS self-production
#folder_name = "virtual_capacity_Maximize Self-Production_ev_one_charging_station_9_18_Steps_10_5_4kW_cs_udp_100"
#id = "57ba29defc87" # with ESS self-production
#folder_name = "virtual_capacity_Maximize Self-Production_ev_one_charging_station_9_18_Steps_10_5_4kW_cs_udp_1000"
#id = "097c510e74c2" # with ESS self-production
#folder_name = "virtual_capacity_Maximize Self-Production_ev_one_charging_station_9_18_Steps_10_5_4kW_cs_udp_10000"
#id = "21e301be113c" # with ESS self-production
#folder_name = "virtual_capacity_Maximize Self-Production_ev_one_charging_station_9_18_Steps_10_5_4kW_cs_udp_100000"
id = "48d01b25ce19" # with ESS self-production
folder_name = "virtual_capacity_Maximize Self-Production_ev_one_charging_station_9_18_Steps_10_5_4kW_cs_udp_1000000"
#id = "b7ad4b743b74" # with ESS self-production
#folder_name = "Bolzano_residential_1_ESS"
#id = "3d68cf40d742" #with ESS self-production
#folder_name = "grid_with_ESS_self_production"
#id = "8fe2e7980820" #with PV_no_control
#folder_name = "grid_with_PV_no_control"
#id = "1add147eeb22" # with PV limit power
#folder_name = "grid_with_PV_limit_power_60"
#id = "d1f51b384763" # with PV limit power
#folder_name = "grid_with_PV_volt_watt"
#id = "5807c499a61b" # with ESS self-production
#folder_name = "grid_with_ESS_self_production_gurobi_simplon"
#id = "28b61889ef6b" # with ESS self-production
#folder_name = "grid_with_ESS_self_production_gurobi_local"
#id = "3350a4f1c2e5" # with ESS self-production
#folder_name = "grid_with_ESS_self_production_ipopt_local"
#id = "22912f50dc71" #with PV_no_control
#folder_name = "grid_with_PV_gurobi"
#id = "b7690dbeac26" # with PV limit power
#folder_name = "grid_with_PV_limit_power_60_gurobi"
#id = "32d8e9338170" # with PV limit power
#folder_name = "grid_with_PV_volt_watt_gurobi"
id_list=["a4444a735c1b","f9e7ec8fba53"]
name_list=[]
logger.debug("########################## Voltages ##########################################")
#plotter.create_plots_for_voltages(id, folder_name)
logger.debug("########################## Powers ##########################################")
#plotter.create_plots_for_powers(id, folder_name)
logger.debug("########################## SoCs ##########################################")
#plotter.create_plots_for_socs(id, folder_name)
logger.debug("########################## PV Usage ##########################################")
#plotter.create_plots_for_pv_usage(id, folder_name)
else:
folder_name1 = "grid_with_PV_no_control"
folder_name2 = "grid_with_PV_limit_power_60"
#folder_name3 = "grid_with_PV_volt_watt"
#folder_name4 = "grid_with_ESS_self_production_gurobi_simplon"
#folder_name3 = "grid_with_ESS_self_production_equal_PV"
list_folders = [folder_name1, folder_name2]#, folder_name3, folder_name4]
#folder_name = "grid_with_ESS_self_production_equal_gurobi_simplon"
#folder_name1 = "grid_with_ESS_self_production_gurobi_local"
#list_folders = [folder_name, folder_name1]
list_names = ["PV penetration 100%","Limit power 60%"]#, "Volt-Watt", "Min self-production"]
#list_names = ["Min self-production simplon", "Min self-production localhost"]
len_list_folders = len(list_folders)
count = 0
file_path_to_store = ""
for folder in list_folders:
file_path_to_store = file_path_to_store + folder
if not count == (len_list_folders - 1):
file_path_to_store = file_path_to_store + "_and_"
count = count + 1
logger.debug("file_path_to_store: "+str(file_path_to_store))
plotter.compare_files(list_folders, file_path_to_store, "voltage_phase_1.json",None, list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_P.json", "grid",list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_P.json","load", list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_P.json", "pv", list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_P.json","storage", list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_Q.json", "grid", list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_Q.json", "load", list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_Q.json", "pv", list_names)
plotter.compare_files(list_folders, file_path_to_store, "powers_Q.json", "storage", list_names)
if __name__ == '__main__':
main()
|
[
"seaborn.set_style",
"seaborn.lineplot",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"logging.basicConfig",
"os.path.join",
"matplotlib.pyplot.legend",
"os.walk",
"data_management.utils.Utils",
"profess.Http_commands.Http_commands",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xticks",
"seaborn.hls_palette",
"plotter.comparison.Comparison",
"seaborn.set",
"logging.getLogger"
] |
[((264, 367), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(name)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s %(levelname)s %(name)s: %(message)s', level=logging.DEBUG)\n", (283, 367), False, 'import logging\n'), ((372, 399), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (389, 399), False, 'import logging\n'), ((501, 516), 'profess.Http_commands.Http_commands', 'Http_commands', ([], {}), '()\n', (514, 516), False, 'from profess.Http_commands import Http_commands\n'), ((538, 545), 'data_management.utils.Utils', 'Utils', ([], {}), '()\n', (543, 545), False, 'from data_management.utils import Utils\n'), ((569, 581), 'plotter.comparison.Comparison', 'Comparison', ([], {}), '()\n', (579, 581), False, 'from plotter.comparison import Comparison\n'), ((13272, 13318), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (13294, 13318), True, 'import pandas as pd\n'), ((13445, 13469), 'seaborn.set', 'sns.set', ([], {'context': '"""paper"""'}), "(context='paper')\n", (13452, 13469), True, 'import seaborn as sns\n'), ((13478, 13577), 'seaborn.set', 'sns.set', ([], {'context': '"""paper"""', 'rc': "{'figure.figsize': (14, 8), 'lines.linewidth': 1.5}", 'font_scale': '(2)'}), "(context='paper', rc={'figure.figsize': (14, 8), 'lines.linewidth': \n 1.5}, font_scale=2)\n", (13485, 13577), True, 'import seaborn as sns\n'), ((13581, 13625), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""', "{'grid.color': '.8'}"], {}), "('ticks', {'grid.color': '.8'})\n", (13594, 13625), True, 'import seaborn as sns\n'), ((13641, 13669), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (13651, 13669), True, 'import matplotlib.pyplot as plt\n'), ((15755, 15827), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'obj', 'sort': '(False)', 'ax': 'ax1', 'dashes': '(False)', 'palette': 'palete'}), '(data=obj, sort=False, ax=ax1, dashes=False, palette=palete)\n', (15767, 15827), True, 'import seaborn as sns\n'), ((16260, 16282), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (16270, 16282), True, 'import matplotlib.pyplot as plt\n'), ((16830, 16892), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (16840, 16892), True, 'import matplotlib.pyplot as plt\n'), ((29083, 29119), 'os.path.join', 'os.path.join', (['"""results"""', 'folder_name'], {}), "('results', folder_name)\n", (29095, 29119), False, 'import os\n'), ((29149, 29196), 'os.path.join', 'os.path.join', (['base_all_voltages', '"""all_voltages"""'], {}), "(base_all_voltages, 'all_voltages')\n", (29161, 29196), False, 'import os\n'), ((13405, 13434), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data2'], {}), '(data2)\n', (13427, 13434), True, 'import pandas as pd\n'), ((15913, 16063), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'obj2', 'sort': '(False)', 'ax': 'ax1', 'legend': '"""full"""', 'dashes': '[[4, 2], [1, 4]]', 'palette': "{'higher limit': '#EA2027', 'lower limit': '#EA2027'}"}), "(data=obj2, sort=False, ax=ax1, legend='full', dashes=[[4, 2],\n [1, 4]], palette={'higher limit': '#EA2027', 'lower limit': '#EA2027'})\n", (15925, 16063), True, 'import seaborn as sns\n'), ((17149, 17211), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(1080)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "(path, dpi=1080, format='pdf', bbox_inches='tight')\n", (17160, 17211), True, 'import matplotlib.pyplot as plt\n'), ((17287, 17297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17295, 17297), True, 'import matplotlib.pyplot as plt\n'), ((18050, 18096), 'os.path.join', 'os.path.join', (['"""results"""', 'folder_name', 'bus_name'], {}), "('results', folder_name, bus_name)\n", (18062, 18096), False, 'import os\n'), ((21419, 21465), 'os.path.join', 'os.path.join', (['"""results"""', 'folder_name', 'bus_name'], {}), "('results', folder_name, bus_name)\n", (21431, 21465), False, 'import os\n'), ((21492, 21522), 'os.path.join', 'os.path.join', (['base', '"""powers_P"""'], {}), "(base, 'powers_P')\n", (21504, 21522), False, 'import os\n'), ((21549, 21579), 'os.path.join', 'os.path.join', (['base', '"""powers_Q"""'], {}), "(base, 'powers_Q')\n", (21561, 21579), False, 'import os\n'), ((27769, 27815), 'os.path.join', 'os.path.join', (['"""results"""', 'folder_name', 'bus_name'], {}), "('results', folder_name, bus_name)\n", (27781, 27815), False, 'import os\n'), ((27888, 27918), 'os.path.join', 'os.path.join', (['base', '"""usage_PV"""'], {}), "(base, 'usage_PV')\n", (27900, 27918), False, 'import os\n'), ((30134, 30180), 'os.path.join', 'os.path.join', (['"""results"""', 'folder_name', 'bus_name'], {}), "('results', folder_name, bus_name)\n", (30146, 30180), False, 'import os\n'), ((30218, 30247), 'os.path.join', 'os.path.join', (['base', '"""voltage"""'], {}), "(base, 'voltage')\n", (30230, 30247), False, 'import os\n'), ((32069, 32126), 'os.path.join', 'os.path.join', (['"""results"""', '"""comparison"""', 'file_path_to_store'], {}), "('results', 'comparison', file_path_to_store)\n", (32081, 32126), False, 'import os\n'), ((14335, 14379), 'seaborn.hls_palette', 'sns.hls_palette', (['number_colors'], {'l': '(0.5)', 's': '(0.8)'}), '(number_colors, l=0.5, s=0.8)\n', (14350, 14379), True, 'import seaborn as sns\n'), ((31232, 31266), 'os.path.join', 'os.path.join', (['"""results"""', 'base_path'], {}), "('results', base_path)\n", (31244, 31266), False, 'import os\n'), ((32653, 32715), 'os.path.join', 'os.path.join', (['base_path_to_store', 'node', 'ending_file', 'data_type'], {}), '(base_path_to_store, node, ending_file, data_type)\n', (32665, 32715), False, 'import os\n'), ((32980, 33031), 'os.path.join', 'os.path.join', (['base_path_to_store', 'node', 'ending_file'], {}), '(base_path_to_store, node, ending_file)\n', (32992, 33031), False, 'import os\n'), ((31654, 31684), 'os.walk', 'os.walk', (['currently_folder_path'], {}), '(currently_folder_path)\n', (31661, 31684), False, 'import os\n'), ((19089, 19118), 'os.path.join', 'os.path.join', (['base', '"""soc_ESS"""'], {}), "(base, 'soc_ESS')\n", (19101, 19118), False, 'import os\n'), ((20287, 20315), 'os.path.join', 'os.path.join', (['base', '"""soc_EV"""'], {}), "(base, 'soc_EV')\n", (20299, 20315), False, 'import os\n')]
|
from swsscommon import swsscommon
import time
import os
def test_PortNotification(dvs):
dvs.runcmd("ifconfig Ethernet0 10.0.0.0/31 up") == 0
dvs.runcmd("ifconfig Ethernet4 10.0.0.2/31 up") == 0
dvs.servers[0].runcmd("ip link set down dev eth0") == 0
time.sleep(1)
db = swsscommon.DBConnector(0, dvs.redis_sock, 0)
tbl = swsscommon.Table(db, "PORT_TABLE")
(status, fvs) = tbl.get("Ethernet0")
assert status == True
oper_status = "unknown"
for v in fvs:
if v[0] == "oper_status":
oper_status = v[1]
break
assert oper_status == "down"
dvs.servers[0].runcmd("ip link set up dev eth0") == 0
time.sleep(1)
(status, fvs) = tbl.get("Ethernet0")
assert status == True
oper_status = "unknown"
for v in fvs:
if v[0] == "oper_status":
oper_status = v[1]
break
assert oper_status == "up"
|
[
"swsscommon.swsscommon.DBConnector",
"swsscommon.swsscommon.Table",
"time.sleep"
] |
[((270, 283), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (280, 283), False, 'import time\n'), ((294, 338), 'swsscommon.swsscommon.DBConnector', 'swsscommon.DBConnector', (['(0)', 'dvs.redis_sock', '(0)'], {}), '(0, dvs.redis_sock, 0)\n', (316, 338), False, 'from swsscommon import swsscommon\n'), ((350, 384), 'swsscommon.swsscommon.Table', 'swsscommon.Table', (['db', '"""PORT_TABLE"""'], {}), "(db, 'PORT_TABLE')\n", (366, 384), False, 'from swsscommon import swsscommon\n'), ((683, 696), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (693, 696), False, 'import time\n')]
|
import os, sys
from PIL import Image
sizes = [1024, 180, 167, 152, 120, 87, 80, 76, 60, 58, 40, 29, 20]
def _get_out_path(in_path, dim):
directory, filename = os.path.split(in_path)
filenames = filename.split('_')
filename = "{0}_{1}.png".format(filenames[0], dim)
out_path = os.path.join(directory, filename)
return out_path
def _resize(in_path, dim):
out_path = _get_out_path(in_path, dim)
in_image = Image.open(in_path)
out_image = in_image.resize((dim, dim), Image.LANCZOS)
print(out_path)
out_image.save(out_path, 'png')
in_path = sys.argv[1]
[_resize(in_path, s) for s in sizes]
|
[
"os.path.split",
"os.path.join",
"PIL.Image.open"
] |
[((162, 184), 'os.path.split', 'os.path.split', (['in_path'], {}), '(in_path)\n', (175, 184), False, 'import os, sys\n'), ((282, 315), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (294, 315), False, 'import os, sys\n'), ((413, 432), 'PIL.Image.open', 'Image.open', (['in_path'], {}), '(in_path)\n', (423, 432), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python3
import time
import unittest
import rostest
import rospy
import actionlib
import move_base_msgs.msg
from geometry_msgs.msg import PoseStamped
from araig_msgs.msg import BoolStamped
import concurrent.futures
class MockActionServer():
_feedback = move_base_msgs.msg.MoveBaseFeedback()
_result = move_base_msgs.msg.MoveBaseResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, \
move_base_msgs.msg.MoveBaseAction, \
execute_cb=self.execute_cb, \
auto_start = False)
self._as.start()
def execute_cb(self, goal):
success = True
rospy.loginfo('{}: get goal : {}'.format(self._action_name, goal))
current_position = PoseStamped()
current_position.header.stamp = rospy.Time.now()
current_position.pose.orientation.w = 1
self._feedback.base_position = current_position
self._as.publish_feedback(self._feedback)
if success:
self._as.set_succeeded(self._result, "Goal reached.")
rospy.loginfo('%s: Succeeded' % self._action_name)
class TestGoalInterpreter(unittest.TestCase):
@classmethod
def setUpClass(cls):
rospy.init_node('test_goal_interpreter')
cls.server = MockActionServer('move_base')
def setUp(self):
_pub_topic_start = '/test/start'
_sub_topic_goal = '/test/goal'
self.pub_start = rospy.Publisher(_pub_topic_start, BoolStamped, latch=True, queue_size=10)
rospy.Subscriber(_sub_topic_goal, PoseStamped, callback=self.callback_1, queue_size=10)
while (not rospy.has_param("/interpreters/goal") and \
not rospy.has_param("/interpreters/goal_action")):
time.sleep(0.1)
self.goal = rospy.get_param("/interpreters/goal")
self.pose = None
def test_connect_to_action(self):
pub_msg = BoolStamped()
pub_msg.header.stamp = rospy.Time.now()
pub_msg.data = True
self.pub_start.publish(pub_msg)
while self.pose is None:
rospy.sleep(0.1)
self.assertEqual(self.pose.position.x, self.goal["position"]["x"], msg='test1: The goal should be {}, but get {}'.format(self.goal, self.pose))
self.assertAlmostEqual(self.pose.orientation.w, \
0.707, \
msg='test1: The goal should be {}, but get {}'.format(self.goal, self.pose), \
delta = 0.01)
rospy.sleep(0.5)
self.assertEqual(1, self.times, \
msg='test2: The msg should ony publish {}, but published {}'.format(1, self.times))
def callback_1(self, msg):
self.pose = msg.pose
self.times = msg.header.seq
if __name__ == '__main__':
pkg = 'araig_interpreter'
name = 'test_goal_interpreter'
rostest.rosrun(pkg, name, TestGoalInterpreter)
|
[
"geometry_msgs.msg.PoseStamped",
"rospy.Subscriber",
"rospy.Time.now",
"rostest.rosrun",
"rospy.Publisher",
"rospy.sleep",
"time.sleep",
"rospy.get_param",
"araig_msgs.msg.BoolStamped",
"rospy.loginfo",
"rospy.init_node",
"actionlib.SimpleActionServer",
"rospy.has_param"
] |
[((2901, 2947), 'rostest.rosrun', 'rostest.rosrun', (['pkg', 'name', 'TestGoalInterpreter'], {}), '(pkg, name, TestGoalInterpreter)\n', (2915, 2947), False, 'import rostest\n'), ((444, 577), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['self._action_name', 'move_base_msgs.msg.MoveBaseAction'], {'execute_cb': 'self.execute_cb', 'auto_start': '(False)'}), '(self._action_name, move_base_msgs.msg.\n MoveBaseAction, execute_cb=self.execute_cb, auto_start=False)\n', (472, 577), False, 'import actionlib\n'), ((809, 822), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (820, 822), False, 'from geometry_msgs.msg import PoseStamped\n'), ((863, 879), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (877, 879), False, 'import rospy\n'), ((1299, 1339), 'rospy.init_node', 'rospy.init_node', (['"""test_goal_interpreter"""'], {}), "('test_goal_interpreter')\n", (1314, 1339), False, 'import rospy\n'), ((1523, 1596), 'rospy.Publisher', 'rospy.Publisher', (['_pub_topic_start', 'BoolStamped'], {'latch': '(True)', 'queue_size': '(10)'}), '(_pub_topic_start, BoolStamped, latch=True, queue_size=10)\n', (1538, 1596), False, 'import rospy\n'), ((1606, 1697), 'rospy.Subscriber', 'rospy.Subscriber', (['_sub_topic_goal', 'PoseStamped'], {'callback': 'self.callback_1', 'queue_size': '(10)'}), '(_sub_topic_goal, PoseStamped, callback=self.callback_1,\n queue_size=10)\n', (1622, 1697), False, 'import rospy\n'), ((1878, 1915), 'rospy.get_param', 'rospy.get_param', (['"""/interpreters/goal"""'], {}), "('/interpreters/goal')\n", (1893, 1915), False, 'import rospy\n'), ((2002, 2015), 'araig_msgs.msg.BoolStamped', 'BoolStamped', ([], {}), '()\n', (2013, 2015), False, 'from araig_msgs.msg import BoolStamped\n'), ((2047, 2063), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2061, 2063), False, 'import rospy\n'), ((2552, 2568), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (2563, 2568), False, 'import rospy\n'), ((1150, 1200), 'rospy.loginfo', 'rospy.loginfo', (["('%s: Succeeded' % self._action_name)"], {}), "('%s: Succeeded' % self._action_name)\n", (1163, 1200), False, 'import rospy\n'), ((1833, 1848), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1843, 1848), False, 'import time\n'), ((2178, 2194), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (2189, 2194), False, 'import rospy\n'), ((1714, 1751), 'rospy.has_param', 'rospy.has_param', (['"""/interpreters/goal"""'], {}), "('/interpreters/goal')\n", (1729, 1751), False, 'import rospy\n'), ((1774, 1818), 'rospy.has_param', 'rospy.has_param', (['"""/interpreters/goal_action"""'], {}), "('/interpreters/goal_action')\n", (1789, 1818), False, 'import rospy\n')]
|
from __future__ import print_function, absolute_import, division, unicode_literals
from pyanalyze.name_check_visitor import NameCheckVisitor
if __name__ == "__main__":
NameCheckVisitor.main()
|
[
"pyanalyze.name_check_visitor.NameCheckVisitor.main"
] |
[((174, 197), 'pyanalyze.name_check_visitor.NameCheckVisitor.main', 'NameCheckVisitor.main', ([], {}), '()\n', (195, 197), False, 'from pyanalyze.name_check_visitor import NameCheckVisitor\n')]
|
import collections
import gevent
from gevent.pywsgi import ( # noqa: F401
WSGIServer,
)
from gevent import ( # noqa: F401
subprocess,
socket,
threading,
)
import pylru
from geventhttpclient import HTTPClient
from web3.utils.six import urlparse
_client_cache = pylru.lrucache(8)
sleep = gevent.sleep
spawn = gevent.spawn
GreenletThread = gevent.Greenlet
class Timeout(gevent.Timeout):
def check(self):
pass
def sleep(self, seconds):
gevent.sleep(seconds)
def make_server(host, port, application, *args, **kwargs):
server = WSGIServer((host, port), application, *args, **kwargs)
return server
def _get_client(host, port, **kwargs):
ordered_kwargs = collections.OrderedDict(sorted(kwargs.items()))
cache_key = '{0}:{1}:{2}'.format(
host,
port,
':'.join((
"{0}={1}".format(str(key), str(value))
for key, value in ordered_kwargs.items()
))
)
if cache_key not in _client_cache:
_client_cache[cache_key] = HTTPClient(host, port, **kwargs)
return _client_cache[cache_key]
def make_post_request(endpoint_uri, data, **kwargs):
url_parts = urlparse(endpoint_uri)
host, _, port = url_parts.netloc.partition(':')
if not port:
if url_parts.scheme == 'http':
port = 80
elif url_parts.scheme == 'https':
port = 443
else:
raise ValueError("Unsupported scheme: '{0}'".format(url_parts.scheme))
kwargs.setdefault('ssl', url_parts.scheme == 'https')
kwargs.setdefault('connection_timeout', 10)
kwargs.setdefault('network_timeout', 10)
kwargs.setdefault('concurrency', 10)
client = _get_client(host, port, **kwargs)
response = client.post(url_parts.path, body=data)
response_body = response.read()
return response_body
|
[
"geventhttpclient.HTTPClient",
"web3.utils.six.urlparse",
"pylru.lrucache",
"gevent.pywsgi.WSGIServer",
"gevent.sleep"
] |
[((284, 301), 'pylru.lrucache', 'pylru.lrucache', (['(8)'], {}), '(8)\n', (298, 301), False, 'import pylru\n'), ((581, 635), 'gevent.pywsgi.WSGIServer', 'WSGIServer', (['(host, port)', 'application', '*args'], {}), '((host, port), application, *args, **kwargs)\n', (591, 635), False, 'from gevent.pywsgi import WSGIServer\n'), ((1184, 1206), 'web3.utils.six.urlparse', 'urlparse', (['endpoint_uri'], {}), '(endpoint_uri)\n', (1192, 1206), False, 'from web3.utils.six import urlparse\n'), ((485, 506), 'gevent.sleep', 'gevent.sleep', (['seconds'], {}), '(seconds)\n', (497, 506), False, 'import gevent\n'), ((1044, 1076), 'geventhttpclient.HTTPClient', 'HTTPClient', (['host', 'port'], {}), '(host, port, **kwargs)\n', (1054, 1076), False, 'from geventhttpclient import HTTPClient\n')]
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The author of this file is: https://github.com/mg2015started
import numpy as np
def get_split_batch(batch):
"""memory.sample() returns a batch of experiences, but we want an array
for each element in the memory (s, a, r, s', done)"""
states_mb = np.array([each[0][0] for each in batch])
# print(states_mb.shape)
actions_mb = np.array([each[0][1] for each in batch])
# print(actions_mb.shape)
rewards_mb = np.array([each[0][2] for each in batch])
# print(rewards_mb.shape)
next_states_mb = np.array([each[0][3] for each in batch])
# print(next_states_mb.shape)
dones_mb = np.array([each[0][4] for each in batch])
return states_mb, actions_mb, rewards_mb, next_states_mb, dones_mb
def OU(action, mu=0, theta=0.15, sigma=0.3):
# noise = np.ones(action_dim) * mu
noise = theta * (mu - action) + sigma * np.random.randn(1)
# noise = noise + d_noise
return noise
def calculate_angle(ego_location, goal_location, ego_direction):
# calculate vector direction
goal_location = np.array(goal_location)
ego_location = np.array(ego_location)
goal_vector = goal_location - ego_location
L_g_vector = np.sqrt(goal_vector.dot(goal_vector))
ego_vector = np.array(
[np.cos(ego_direction * np.pi / 180), np.sin(ego_direction * np.pi / 180)]
)
L_e_vector = np.sqrt(ego_vector.dot(ego_vector))
cos_angle = goal_vector.dot(ego_vector) / (L_g_vector * L_e_vector)
angle = (np.arccos(cos_angle)) * 180 / np.pi
if np.cross(goal_vector, ego_vector) > 0:
angle = -angle
return angle
def calculate_distance(location_a, location_b):
""" calculate distance between a and b"""
return np.linalg.norm(location_a - location_b)
|
[
"numpy.random.randn",
"numpy.cross",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"numpy.arccos"
] |
[((1393, 1433), 'numpy.array', 'np.array', (['[each[0][0] for each in batch]'], {}), '([each[0][0] for each in batch])\n', (1401, 1433), True, 'import numpy as np\n'), ((1480, 1520), 'numpy.array', 'np.array', (['[each[0][1] for each in batch]'], {}), '([each[0][1] for each in batch])\n', (1488, 1520), True, 'import numpy as np\n'), ((1568, 1608), 'numpy.array', 'np.array', (['[each[0][2] for each in batch]'], {}), '([each[0][2] for each in batch])\n', (1576, 1608), True, 'import numpy as np\n'), ((1660, 1700), 'numpy.array', 'np.array', (['[each[0][3] for each in batch]'], {}), '([each[0][3] for each in batch])\n', (1668, 1700), True, 'import numpy as np\n'), ((1750, 1790), 'numpy.array', 'np.array', (['[each[0][4] for each in batch]'], {}), '([each[0][4] for each in batch])\n', (1758, 1790), True, 'import numpy as np\n'), ((2179, 2202), 'numpy.array', 'np.array', (['goal_location'], {}), '(goal_location)\n', (2187, 2202), True, 'import numpy as np\n'), ((2222, 2244), 'numpy.array', 'np.array', (['ego_location'], {}), '(ego_location)\n', (2230, 2244), True, 'import numpy as np\n'), ((2830, 2869), 'numpy.linalg.norm', 'np.linalg.norm', (['(location_a - location_b)'], {}), '(location_a - location_b)\n', (2844, 2869), True, 'import numpy as np\n'), ((2644, 2677), 'numpy.cross', 'np.cross', (['goal_vector', 'ego_vector'], {}), '(goal_vector, ego_vector)\n', (2652, 2677), True, 'import numpy as np\n'), ((1993, 2011), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (2008, 2011), True, 'import numpy as np\n'), ((2383, 2418), 'numpy.cos', 'np.cos', (['(ego_direction * np.pi / 180)'], {}), '(ego_direction * np.pi / 180)\n', (2389, 2418), True, 'import numpy as np\n'), ((2420, 2455), 'numpy.sin', 'np.sin', (['(ego_direction * np.pi / 180)'], {}), '(ego_direction * np.pi / 180)\n', (2426, 2455), True, 'import numpy as np\n'), ((2601, 2621), 'numpy.arccos', 'np.arccos', (['cos_angle'], {}), '(cos_angle)\n', (2610, 2621), True, 'import numpy as np\n')]
|
# Copyright (c) 2021, Firsterp and contributors
# For license information, please see license.txt
import frappe
from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week
from frappe import _ , scrub
from six import iteritems
from erpnext.accounts.utils import get_fiscal_year
from frappe.model.document import Document
from frappe.utils.background_jobs import enqueue
Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class MigrationRun(Document):
def after_insert(self):
if self.range and self.from_date and self.to_date and not self.is_new():
from dateutil.relativedelta import relativedelta, MO
from_date, to_date = getdate(self.from_date), getdate(self.to_date)
increment = {
'Monthly': 1,
'Quarterly': 3,
'Half-Yearly': 6,
'Yearly': 12
}.get(self.range, 1)
if self.range in ['Monthly', 'Quarterly']:
from_date = from_date.replace(day=1)
elif self.range == 'Yearly':
from_date = get_fiscal_year(from_date)[1]
else:
from_date = from_date + relativedelta(from_date, weekday=MO(-1))
self.periodic_daterange = []
for dummy in range(1, 53):
if self.range == 'Weekly':
period_end_date = add_days(from_date, 6)
else:
period_end_date = add_to_date(from_date, months=increment, days=-1)
if period_end_date > to_date:
period_end_date = to_date
self.periodic_daterange.append(period_end_date)
from_date = add_days(period_end_date, 1)
if period_end_date == to_date:
break
self.periods = []
for end_date in self.periodic_daterange:
if self.range == 'Monthly':
period_from_date = get_first_day(end_date)
elif self.range == "Weekly":
period_from_date = get_first_day_of_week(end_date)
else:
period_from_date = end_date
self.periods.append({
'from_date':period_from_date,
'to_date':end_date
})
frappe.msgprint("Calling Enque..")
self.create_migration_logs()
# frappe.enqueue('migration.migration.doctype.migration_run.migration_run.create_migration_logs', periodic_daterange=self.periods,migration_run=self.name, queue='short')
def create_migration_logs(self):
frappe.publish_realtime('msgprint', 'Starting long job...')
frappe.msgprint("Enqueing..")
for count,date in enumerate(self.periods):
frappe.publish_progress((count+1)*100/len(self.periods), title = _("Creating Migration Run Doc..."))
migration_run_log = frappe.new_doc("Migration Run Log")
migration_run_log.from_date = date['from_date']
migration_run_log.to_date = date['to_date']
# migration_run_log.migration_run = self.name
migration_run_log.insert()
frappe.publish_realtime('msgprint', 'Ending long job...')
frappe.msgprint("Calling Enque..Ended")
|
[
"dateutil.relativedelta.MO",
"frappe.msgprint",
"frappe.utils.add_days",
"frappe.utils.get_first_day_of_week",
"frappe.utils.getdate",
"frappe.utils.add_to_date",
"frappe.utils.get_first_day",
"frappe.new_doc",
"frappe.publish_realtime",
"erpnext.accounts.utils.get_fiscal_year",
"frappe._"
] |
[((2296, 2355), 'frappe.publish_realtime', 'frappe.publish_realtime', (['"""msgprint"""', '"""Starting long job..."""'], {}), "('msgprint', 'Starting long job...')\n", (2319, 2355), False, 'import frappe\n'), ((2358, 2387), 'frappe.msgprint', 'frappe.msgprint', (['"""Enqueing.."""'], {}), "('Enqueing..')\n", (2373, 2387), False, 'import frappe\n'), ((2775, 2832), 'frappe.publish_realtime', 'frappe.publish_realtime', (['"""msgprint"""', '"""Ending long job..."""'], {}), "('msgprint', 'Ending long job...')\n", (2798, 2832), False, 'import frappe\n'), ((2835, 2874), 'frappe.msgprint', 'frappe.msgprint', (['"""Calling Enque..Ended"""'], {}), "('Calling Enque..Ended')\n", (2850, 2874), False, 'import frappe\n'), ((2018, 2052), 'frappe.msgprint', 'frappe.msgprint', (['"""Calling Enque.."""'], {}), "('Calling Enque..')\n", (2033, 2052), False, 'import frappe\n'), ((2560, 2595), 'frappe.new_doc', 'frappe.new_doc', (['"""Migration Run Log"""'], {}), "('Migration Run Log')\n", (2574, 2595), False, 'import frappe\n'), ((794, 817), 'frappe.utils.getdate', 'getdate', (['self.from_date'], {}), '(self.from_date)\n', (801, 817), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((819, 840), 'frappe.utils.getdate', 'getdate', (['self.to_date'], {}), '(self.to_date)\n', (826, 840), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((1563, 1591), 'frappe.utils.add_days', 'add_days', (['period_end_date', '(1)'], {}), '(period_end_date, 1)\n', (1571, 1591), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((1321, 1343), 'frappe.utils.add_days', 'add_days', (['from_date', '(6)'], {}), '(from_date, 6)\n', (1329, 1343), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((1377, 1426), 'frappe.utils.add_to_date', 'add_to_date', (['from_date'], {'months': 'increment', 'days': '(-1)'}), '(from_date, months=increment, days=-1)\n', (1388, 1426), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((1763, 1786), 'frappe.utils.get_first_day', 'get_first_day', (['end_date'], {}), '(end_date)\n', (1776, 1786), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((2501, 2535), 'frappe._', '_', (['"""Creating Migration Run Doc..."""'], {}), "('Creating Migration Run Doc...')\n", (2502, 2535), False, 'from frappe import _, scrub\n'), ((1096, 1122), 'erpnext.accounts.utils.get_fiscal_year', 'get_fiscal_year', (['from_date'], {}), '(from_date)\n', (1111, 1122), False, 'from erpnext.accounts.utils import get_fiscal_year\n'), ((1844, 1875), 'frappe.utils.get_first_day_of_week', 'get_first_day_of_week', (['end_date'], {}), '(end_date)\n', (1865, 1875), False, 'from frappe.utils import date_diff, add_months, today, getdate, add_days, flt, get_last_day, get_first_day, cint, get_link_to_form, rounded, add_to_date, get_first_day_of_week\n'), ((1196, 1202), 'dateutil.relativedelta.MO', 'MO', (['(-1)'], {}), '(-1)\n', (1198, 1202), False, 'from dateutil.relativedelta import relativedelta, MO\n')]
|
import sys; sys.path.append('../..')
from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data
import pandas as pd
import numpy as np
import os
from datetime import datetime
import time
from random import randint
import sys
import glob
import logging
from utils.process_tweet import ProcessTweet
logger = logging.getLogger(__name__)
class SampleGenerator(object):
"""Class for generating samples from tweet corpus"""
def __init__(self, seed=None):
if seed is None:
self.seed = randint(0,2**32-1)
else:
self.seed = seed
def create_bins(self, df, column, lower, upper):
self.df = df
bins = {}
_u = df.index.astype(str)
df[column] = [i[lower:upper] for i in _u]
for unique in df[column].unique():
bins[unique] = df.loc[df[column] == unique]
self.bins = bins
self.bin_type = column
return bins, column
def create_all_bins(self, df):
return self.create_bins(df, 'all', 0, 0)
def create_year_bins(self, df):
return self.create_bins(df, 'years', 0, 4)
def create_month_bins(self, df):
return self.create_bins(df, 'months', 0, 7)
def create_day_bins(self, df):
return self.create_bins(df, 'days', 0, 10)
def random_sample(self, df, size):
"""Generates a new batch which is randomly sampled from the cleaned data
"""
return df.sample(size, random_state=self.seed)
def create_sample(self, bins=None, size=None, bins_unused=None, bins_used=None, bin_size=None):
if bins is None:
bins = self.bins
bin_count = len(bins)
if bin_size is None:
bin_size = int(size / bin_count)
samples = []
for unique in bins:
rows = bins[unique]
target_size = bin_size
exclude_unused = set()
exclude_used = set()
if not bins_unused is None and unique in bins_unused:
rows_unused = bins_unused[unique]
exclude_unused = set(rows_unused.id)
target_size += len(exclude_unused)
if not bins_used is None and unique in bins_used:
rows_used = bins_used[unique]
exclude_used = set(rows_used.id)
if len(rows) < target_size:
sample = rows.sample(frac=1)
else:
sample = rows.sample(n=target_size, random_state=self.seed)
exclude = exclude_unused | exclude_used
sample = sample[~sample.id.isin(exclude)]
target_size = bin_size - len(exclude_used)
if len(sample) > target_size:
sample = sample[:target_size]
samples.append(sample)
sample = pd.concat(samples)
self.sample = sample
return sample
def write_sample(self, sample, mode, columns=['id','text'], size='', min_date=None, max_date=None, flags=''):
if len(sample) == 0:
logger.warn('No sample files written. Aborting.')
return
timestamp = time.strftime('%Y-%m-%d_%H-%M-%S')
min_date_str = ''
if min_date is not None:
min_date_str = '_min_date_{}'.format(min_date)
max_date_str = ''
if max_date is not None:
max_date_str = '_max_date_{}'.format(max_date)
f_name = 'sampled_{mode}_{len_sample}_{size}_{seed}{min_date}{max_date}_created_{timestamp}{flags}.csv'.format(mode=mode, len_sample=len(sample),
size=size, seed=self.seed, timestamp=timestamp, min_date=min_date_str, max_date=max_date_str, flags=flags)
full_path = os.path.join(find_folder('2_sampled'), f_name)
logger.info('Writing file {} ...'.format(full_path))
if 'all' in columns:
sample.to_csv(full_path, encoding='utf8')
else:
sample[columns].to_csv(full_path, encoding='utf8', index=False, header=False)
def stats(self, ignore_previous=False):
df_samples = get_sampled_data()
try:
df_labels = get_labelled_data()
except FileNotFoundError:
tweet_ids_labelled = set()
else:
tweet_ids_labelled = set(df_labels['tweet_id'])
# Ids from previous batches
df_batched = get_batched_sample_data()
if len(df_batched) > 0:
tweet_ids_batched = set(df_batched['tweet_id'])
else:
tweet_ids_batched = set()
# Ids from previous batches which were not available
df_unavailable = get_uploaded_batched_data(availability='unavailable')
if len(df_unavailable) > 0:
tweet_ids_unavailable = set(df_unavailable['tweet_id'])
else:
tweet_ids_unavailable = set()
tweet_ids_sampled = set(df_samples['tweet_id'])
# stats
still_available = tweet_ids_sampled - tweet_ids_unavailable - tweet_ids_labelled
if not ignore_previous:
still_available -= tweet_ids_batched
logger.info('Unique tweets in base sample(s): {:,} (labelled: {:,}, unavailable: {:,}, in previous batches: {:,})'.format(len(tweet_ids_sampled), len(tweet_ids_labelled), len(tweet_ids_unavailable), len(tweet_ids_batched)))
logger.info('Tweets left to sample from: {:,}'.format(len(still_available)))
logger.info('Precentage labelled: {:.2f}%'.format(100*float(len(tweet_ids_labelled)/len(tweet_ids_sampled))))
def generate_batch(self, num_tweets=None, batch_id=None, tail=True, ignore_previous=False):
"""Generates a new batch which takes as input a large sample file provided in `data/2_sampled` and generates a new batch
not including previously annotated tweets.
"""
if num_tweets is None:
raise ValueError('Num tweets is zero. Cannot create empty batch.')
# vars
sample_folder = find_folder('2_sampled')
# Ids from sample file
df_samples = get_sampled_data()
if len(df_samples) == 0:
raise Exception('Sample file is empty. Generate a sample file first.')
tweet_ids_sampled = set(df_samples['tweet_id'])
# Ids from previously labelled data
try:
df_labels = get_labelled_data()
except FileNotFoundError:
tweet_ids_labelled = set()
else:
tweet_ids_labelled = set(df_labels['tweet_id'])
# Ids from previous batches
df_batched = get_batched_sample_data()
if len(df_batched) > 0:
tweet_ids_batched = set(df_batched['tweet_id'])
else:
tweet_ids_batched = set()
# Ids from previous batches which were not available
df_unavailable = get_uploaded_batched_data(availability='unavailable')
if len(df_unavailable) > 0:
tweet_ids_unavailable = set(df_unavailable['tweet_id'])
else:
tweet_ids_unavailable = set()
# remove tweets which are unavailable, have been previously labelled
still_available = tweet_ids_sampled - tweet_ids_unavailable - tweet_ids_labelled
if not ignore_previous:
still_available -= tweet_ids_batched
logger.info('Unique tweets in base sample(s): {:,} (labelled: {:,}, unavailable: {:,}, in previous batches: {:,})'.format(len(tweet_ids_sampled), len(tweet_ids_labelled), len(tweet_ids_unavailable), len(tweet_ids_batched)))
logger.info('Tweets left to sample from: {:,}'.format(len(still_available)))
logger.info('Precentage labelled: {:.2f}%'.format(100*float(len(tweet_ids_labelled)/len(tweet_ids_sampled))))
# return conditions
if len(still_available) <= 0:
logger.warn('All available tweets have been labelled.'.format(len(tweet_ids_sampled), len(still_available)))
return
if num_tweets > len(still_available):
logger.warn('Requested to create batch of {:,}, but only {:,} are still available.'.format(num_tweets, len(still_available)))
return
if tail:
batch = df_samples.loc[df_samples['tweet_id'].isin(still_available)][-num_tweets:]
else:
batch = df_samples.loc[df_samples['tweet_id'].isin(still_available)][:num_tweets]
assert len(batch) == num_tweets
# write new batch file
if batch_id is None:
try:
batch_id = 1 + max([int(s.split('_')[-1]) for s in os.listdir(sample_folder) if s.startswith('batch_') and os.path.isdir(os.path.join(sample_folder, s))])
except ValueError:
batch_id = 1
batch_name = 'batch_{}'.format(batch_id)
logger.info('Generating batch {} of size {:,} tweets...'.format(batch_name, num_tweets))
output_folder = os.path.join(sample_folder, batch_name)
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
else:
raise Exception('Found pre-existing folder "{}". Please remove this folder first or pick a different batch ID.'.format(output_folder))
f_path = os.path.join(output_folder, '{}_{}.csv'.format(batch_name, datetime.now().strftime('%Y-%m-%d')))
batch.to_csv(f_path, header=None, index=False, encoding='utf8')
logger.info('Successfully wrote file containing new batch "{}"'.format(f_path))
# methods for distribution analysis
def monthdelta(self,date,base):
year_delta = int(date[0:4]) - int(base[0:4])
month_delta = int(date[5:7]) - int(base[5:7])
return year_delta * 12 + month_delta
def create_distributions(self,df=None,sample=None):
if df is None:
df = self.df
if sample is None:
sample = self.sample
df['in_sample'] = df['id'].isin(sample['id'])
df['datetime'] = [str(idx) for idx in df.index]
base_date_str = df.datetime[0][0:10]
base_date = datetime.strptime(base_date_str,'%Y-%m-%d')
df['day_idx'] = [(datetime.strptime(str(idx[0:10]),'%Y-%m-%d') - base_date).days for idx in df.datetime]
df['month_idx'] = [self.monthdelta(str(idx)[0:10],base_date_str) for idx in df.datetime]
df['year_idx'] = [int(idx[0:4]) - int(base_date_str[0:4]) for idx in df.datetime]
df['idx'] = range(0,len(df))
self.indices = df.loc[df['in_sample']]['idx']
self.days = df.loc[df['in_sample']]['day_idx']
self.months = df.loc[df['in_sample']]['month_idx']
self.years = df.loc[df['in_sample']]['year_idx']
return self.indices,self.days,self.months,self.years
def run(size=None, langs=None, include_replies=False, anonymize=True, contains_keywords=False, min_token_count=3, mode='monthly', seed=None, extend=False, bin_size=None, min_date=None, max_date=None):
if bin_size is None:
logger.info('Creating sample of size {:,}...'.format(size))
else:
logger.info('Creating sample of size {:,} or bin size {:,}...'.format(size, bin_size))
df = get_parsed_data(usecols=['id', 'text', 'created_at', 'lang', 'is_reply', 'has_quote', 'token_count'],
contains_keywords=contains_keywords,
s_date=min_date,
e_date=max_date)
logger.info(f'Read {len(df):,} samples. Filtering...')
flags = ''
# Filter by date
if min_date is not None or max_date is not None:
logger.info('Filtering by dates...')
df = df.set_index('created_at')[min_date:max_date].reset_index()
# Min token count
if isinstance(min_token_count, int):
logger.info('Filtering by min_token_count...')
df = df[df.token_count > min_token_count]
if not include_replies:
# by default filter replies
df = df[~df.is_reply]
else:
logger.info('Including replies...')
flags += '_include_replies'
# Contains keywords
if contains_keywords:
# data was already filtered in get_parsed_data
logger.info('Filtered for contains_keywords...')
flags += '_contains_keywords'
# Filter by language
if isinstance(langs, list):
if len(langs) > 0:
logger.info('Filtering for languages {}...'.format(','.join(langs)))
df = df[df.lang.isin(langs)]
flags += '_langs_{}'.format(','.join(langs))
# Filter previous
if extend:
logger.info('Extending previous sampled data...')
flags += '_extended'
df_sampled = get_sampled_data()
df = df[~df.id.isin(df_sampled.tweet_id)]
df = df[~df.text.isin(df_sampled.tweet_text)]
# is_duplicate only marks duplicates before replacing <url> and @user tokens
logger.info('Final screening for duplicates...')
df['text_cleared'] = df.text.str.replace(r'@<user>|<url>', '')
df['text_cleared'] = df.text_cleared.str.strip()
df = df.drop_duplicates(subset=['text_cleared'])
df = df.drop(['text_cleared'], axis=1) # release memory
logger.info(f'... {len(df):,} rows in filtered data')
generator = SampleGenerator(seed=seed)
sample = pd.DataFrame()
if mode == 'monthly':
if extend:
logger.info('Extending sample by evenly spread months based on seed "{}"...'.format(generator.seed))
logger.info('Reading unavailable tweets...')
df_unavailable = get_uploaded_batched_data('unavailable')
unused_ids = set(df_unavailable['tweet_id'])
unused = df[df.id.isin(unused_ids)].copy()
unbins, _ = generator.create_month_bins(unused)
logger.info('Reading available tweets...')
df_available = get_uploaded_batched_data('available')
used_ids = set(df_available['tweet_id'])
used = pd.DataFrame(df[df.id.isin(used_ids)].copy())
ubins, _ = generator.create_month_bins(used)
logger.info('Generating sample...')
else:
unbins = None
ubins = None
logger.info('Generating sample by evenly spread months...')
bins, bin_type = generator.create_month_bins(df)
sample = generator.create_sample(bins, size=size, bins_unused=unbins, bins_used=ubins, bin_size=bin_size)
elif mode == 'random':
logger.info('Generating random sample...')
sample = generator.random_sample(df, size)
# anonymize
if anonymize:
logger.info('Anonymizing sample...')
sample.loc[:, 'text'] = sample.text.apply(ProcessTweet.anonymize_text)
generator.write_sample(sample, mode, size=('bin' + str(bin_size)) if size is None else size, min_date=min_date, max_date=max_date, flags=flags)
|
[
"sys.path.append",
"pandas.DataFrame",
"os.mkdir",
"os.listdir",
"utils.helpers.find_folder",
"random.randint",
"os.path.isdir",
"time.strftime",
"datetime.datetime.now",
"datetime.datetime.strptime",
"utils.helpers.get_batched_sample_data",
"utils.helpers.get_labelled_data",
"utils.helpers.get_parsed_data",
"utils.helpers.get_uploaded_batched_data",
"os.path.join",
"pandas.concat",
"logging.getLogger",
"utils.helpers.get_sampled_data"
] |
[((12, 36), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (27, 36), False, 'import sys\n'), ((419, 446), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (436, 446), False, 'import logging\n'), ((11067, 11247), 'utils.helpers.get_parsed_data', 'get_parsed_data', ([], {'usecols': "['id', 'text', 'created_at', 'lang', 'is_reply', 'has_quote', 'token_count']", 'contains_keywords': 'contains_keywords', 's_date': 'min_date', 'e_date': 'max_date'}), "(usecols=['id', 'text', 'created_at', 'lang', 'is_reply',\n 'has_quote', 'token_count'], contains_keywords=contains_keywords,\n s_date=min_date, e_date=max_date)\n", (11082, 11247), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((13106, 13120), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13118, 13120), True, 'import pandas as pd\n'), ((2881, 2899), 'pandas.concat', 'pd.concat', (['samples'], {}), '(samples)\n', (2890, 2899), True, 'import pandas as pd\n'), ((3196, 3230), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H-%M-%S"""'], {}), "('%Y-%m-%d_%H-%M-%S')\n", (3209, 3230), False, 'import time\n'), ((4125, 4143), 'utils.helpers.get_sampled_data', 'get_sampled_data', ([], {}), '()\n', (4141, 4143), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((4405, 4430), 'utils.helpers.get_batched_sample_data', 'get_batched_sample_data', ([], {}), '()\n', (4428, 4430), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((4661, 4714), 'utils.helpers.get_uploaded_batched_data', 'get_uploaded_batched_data', ([], {'availability': '"""unavailable"""'}), "(availability='unavailable')\n", (4686, 4714), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((5991, 6015), 'utils.helpers.find_folder', 'find_folder', (['"""2_sampled"""'], {}), "('2_sampled')\n", (6002, 6015), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((6068, 6086), 'utils.helpers.get_sampled_data', 'get_sampled_data', ([], {}), '()\n', (6084, 6086), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((6564, 6589), 'utils.helpers.get_batched_sample_data', 'get_batched_sample_data', ([], {}), '()\n', (6587, 6589), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((6820, 6873), 'utils.helpers.get_uploaded_batched_data', 'get_uploaded_batched_data', ([], {'availability': '"""unavailable"""'}), "(availability='unavailable')\n", (6845, 6873), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((8863, 8902), 'os.path.join', 'os.path.join', (['sample_folder', 'batch_name'], {}), '(sample_folder, batch_name)\n', (8875, 8902), False, 'import os\n'), ((9989, 10033), 'datetime.datetime.strptime', 'datetime.strptime', (['base_date_str', '"""%Y-%m-%d"""'], {}), "(base_date_str, '%Y-%m-%d')\n", (10006, 10033), False, 'from datetime import datetime\n'), ((12502, 12520), 'utils.helpers.get_sampled_data', 'get_sampled_data', ([], {}), '()\n', (12518, 12520), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((621, 644), 'random.randint', 'randint', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (628, 644), False, 'from random import randint\n'), ((3777, 3801), 'utils.helpers.find_folder', 'find_folder', (['"""2_sampled"""'], {}), "('2_sampled')\n", (3788, 3801), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((4181, 4200), 'utils.helpers.get_labelled_data', 'get_labelled_data', ([], {}), '()\n', (4198, 4200), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((6340, 6359), 'utils.helpers.get_labelled_data', 'get_labelled_data', ([], {}), '()\n', (6357, 6359), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((8918, 8946), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (8931, 8946), False, 'import os\n'), ((8960, 8983), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (8968, 8983), False, 'import os\n'), ((13365, 13405), 'utils.helpers.get_uploaded_batched_data', 'get_uploaded_batched_data', (['"""unavailable"""'], {}), "('unavailable')\n", (13390, 13405), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((13660, 13698), 'utils.helpers.get_uploaded_batched_data', 'get_uploaded_batched_data', (['"""available"""'], {}), "('available')\n", (13685, 13698), False, 'from utils.helpers import get_parsed_data, get_sampled_data, get_labelled_data, get_cleaned_labelled_data, find_folder, get_uploaded_batched_data, get_batched_sample_data\n'), ((9221, 9235), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9233, 9235), False, 'from datetime import datetime\n'), ((8529, 8554), 'os.listdir', 'os.listdir', (['sample_folder'], {}), '(sample_folder)\n', (8539, 8554), False, 'import os\n'), ((8599, 8629), 'os.path.join', 'os.path.join', (['sample_folder', 's'], {}), '(sample_folder, s)\n', (8611, 8629), False, 'import os\n')]
|
import io
from typing import Iterator, Optional
from config import *
from utils import create_client_engine
class StringIteratorIO(io.TextIOBase):
def __init__(self, iter: Iterator[str]):
self._iter = iter
self._buff = ""
def readable(self) -> bool:
return True
def _read1(self, n: Optional[int] = None) -> str:
while not self._buff:
try:
self._buff = next(self._iter)
except StopIteration:
break
ret = self._buff[:n]
self._buff = self._buff[len(ret) :]
return ret
def read(self, n: Optional[int] = None) -> str:
line = []
if n is None or n < 0:
while True:
m = self._read1()
if not m:
break
line.append(m)
else:
while n > 0:
m = self._read1(n)
if not m:
break
n -= len(m)
line.append(m)
return "".join(line)
def clean_csv_value(value) -> str:
if value is None:
return r"\N"
return str(value).replace("\n", "\\n")
def copy_string_iterator(df, size):
client, client_engine = create_client_engine()
string_iterator = StringIteratorIO(
(
"|".join(
map(
clean_csv_value,
(
row["referrer"],
row["resource"],
row["type"],
row["number_of_occurrences"],
row["date"],
row["wiki"],
),
)
)
+ "\n"
for _, row in df.iterrows()
)
)
# print(string_iterator.read())
client.cursor.copy_from(
string_iterator, f"staging_{DATAWAREHOUSE_TABLE}", sep="|", size=size
)
client.disconnect()
|
[
"utils.create_client_engine"
] |
[((1240, 1262), 'utils.create_client_engine', 'create_client_engine', ([], {}), '()\n', (1260, 1262), False, 'from utils import create_client_engine\n')]
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""translation dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
from disentanglement_lib.data.ground_truth import ground_truth_data
from disentanglement_lib.data.ground_truth import util
import numpy as np
import gin
from six.moves import range
def to_honeycomb(x):
x1 = np.zeros(x.shape)
x1[:, 0] = x[:, 0] + (x[:, 1] % 2) * 0.5
x1[:, 1] = x[:, 1] / 2 * np.sqrt(3)
return x1
@gin.configurable("translation")
class Translation(ground_truth_data.GroundTruthData):
"""Translation dataset.
"""
def __init__(self, pos_type: int, radius=10):
# By default, all factors (including shape) are considered ground truth
# factors.
factors = np.zeros((22 * 22, 2))
factors[:, 0] = np.arange(22 * 22) // 22
factors[:, 1] = np.arange(22 * 22) % 22
factors = factors
if pos_type == 0:
pos = factors * 2 # cartesian
elif pos_type == 1:
pos = to_honeycomb(factors) * 2
elif pos_type == 2:
r = 1 + factors[:, 0] / 22 * 20
theta = factors[:, 1] / 22 * 2 * np.pi
pos = np.zeros((22 * 22, 2))
pos[:, 1] = r * np.cos(theta) + 32
pos[:, 0] = r * np.sin(theta) + 32
else:
raise NotImplementedError()
self.data_shape = [64, 64, 1]
self.factor_sizes = [22, 22]
self.pos = pos
self.latent_factor_indices = np.zeros(self.factor_sizes, dtype=np.int)
for i in range(self.factor_sizes[0]):
self.latent_factor_indices[i] = self.factor_sizes[0] * i + np.arange(self.factor_sizes[1])
self.data = np.zeros([len(self)] + self.data_shape, dtype=np.float32)
index = 0
for i in range(self.factor_sizes[0]):
for j in range(self.factor_sizes[1]):
img = np.zeros(self.data_shape)
cv2.circle(img, (10, 10), radius, (1.0,), -1)
M = np.float32([[1, 0, pos[index, 0]], [0, 1, pos[index, 1]]])
self.data[index, :, :, 0] = cv2.warpAffine(img, M, (64, 64))
index += 1
@property
def num_factors(self):
return len(self.factor_sizes)
@property
def factors_num_values(self):
return self.factor_sizes
@property
def observation_shape(self):
return self.data_shape
def sample_factors(self, num, random_state):
"""Sample a batch of factors Y."""
factors = [random_state.randint(i, size=num) for i in self.factors_num_values]
return np.stack(factors, axis=1)
def sample_observations_from_factors(self, factors, random_state):
indices = self.latent_factor_indices[factors[:, 0], factors[:, 1]]
return self.data[indices]
def _sample_factor(self, i, num, random_state):
return random_state.randint(self.factor_sizes[i], size=num)
def set_img(original, i, j, img):
h, w, _ = img.shape
if i + h > 64:
original[i:i + h, j:j + w] = img[:min(-1, 64 - i - h)]
else:
original[i:i + h, j:j + w] = img
return original
|
[
"numpy.stack",
"cv2.circle",
"six.moves.range",
"numpy.float32",
"numpy.zeros",
"cv2.warpAffine",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"gin.configurable",
"numpy.sqrt"
] |
[((1125, 1156), 'gin.configurable', 'gin.configurable', (['"""translation"""'], {}), "('translation')\n", (1141, 1156), False, 'import gin\n'), ((1005, 1022), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (1013, 1022), True, 'import numpy as np\n'), ((1097, 1107), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1104, 1107), True, 'import numpy as np\n'), ((1415, 1437), 'numpy.zeros', 'np.zeros', (['(22 * 22, 2)'], {}), '((22 * 22, 2))\n', (1423, 1437), True, 'import numpy as np\n'), ((2150, 2191), 'numpy.zeros', 'np.zeros', (['self.factor_sizes'], {'dtype': 'np.int'}), '(self.factor_sizes, dtype=np.int)\n', (2158, 2191), True, 'import numpy as np\n'), ((2209, 2236), 'six.moves.range', 'range', (['self.factor_sizes[0]'], {}), '(self.factor_sizes[0])\n', (2214, 2236), False, 'from six.moves import range\n'), ((2457, 2484), 'six.moves.range', 'range', (['self.factor_sizes[0]'], {}), '(self.factor_sizes[0])\n', (2462, 2484), False, 'from six.moves import range\n'), ((3266, 3291), 'numpy.stack', 'np.stack', (['factors'], {'axis': '(1)'}), '(factors, axis=1)\n', (3274, 3291), True, 'import numpy as np\n'), ((1462, 1480), 'numpy.arange', 'np.arange', (['(22 * 22)'], {}), '(22 * 22)\n', (1471, 1480), True, 'import numpy as np\n'), ((1511, 1529), 'numpy.arange', 'np.arange', (['(22 * 22)'], {}), '(22 * 22)\n', (1520, 1529), True, 'import numpy as np\n'), ((2507, 2534), 'six.moves.range', 'range', (['self.factor_sizes[1]'], {}), '(self.factor_sizes[1])\n', (2512, 2534), False, 'from six.moves import range\n'), ((2309, 2340), 'numpy.arange', 'np.arange', (['self.factor_sizes[1]'], {}), '(self.factor_sizes[1])\n', (2318, 2340), True, 'import numpy as np\n'), ((2558, 2583), 'numpy.zeros', 'np.zeros', (['self.data_shape'], {}), '(self.data_shape)\n', (2566, 2583), True, 'import numpy as np\n'), ((2600, 2645), 'cv2.circle', 'cv2.circle', (['img', '(10, 10)', 'radius', '(1.0,)', '(-1)'], {}), '(img, (10, 10), radius, (1.0,), -1)\n', (2610, 2645), False, 'import cv2\n'), ((2666, 2724), 'numpy.float32', 'np.float32', (['[[1, 0, pos[index, 0]], [0, 1, pos[index, 1]]]'], {}), '([[1, 0, pos[index, 0]], [0, 1, pos[index, 1]]])\n', (2676, 2724), True, 'import numpy as np\n'), ((2769, 2801), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(64, 64)'], {}), '(img, M, (64, 64))\n', (2783, 2801), False, 'import cv2\n'), ((1843, 1865), 'numpy.zeros', 'np.zeros', (['(22 * 22, 2)'], {}), '((22 * 22, 2))\n', (1851, 1865), True, 'import numpy as np\n'), ((1894, 1907), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1900, 1907), True, 'import numpy as np\n'), ((1941, 1954), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1947, 1954), True, 'import numpy as np\n')]
|
import PyQt5
import os
import imutils
import cv2
import numpy as np
from PIL import Image as im
from PyQt5 import QtWidgets, uic, QtGui
from PyQt5.QtGui import QGuiApplication
import sys
#Image augmentation GUI App
def contour_crop_no_resize(image,dim):
'''
Contour and crop the image (generally used in brain mri images and object detection)
:param image: cv2 object
'''
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
return resized
def contour_crop_resize(image,dim):
'''
Contour and crop the image (generally used in brain mri images and object detection)
:param image: cv2 object
'''
grayscale=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
grayscale=cv2.GaussianBlur(grayscale,(5,5),0)
threshold_image=cv2.threshold(grayscale,45,255,cv2.THRESH_BINARY)[1]
threshold_image=cv2.erode(threshold_image,None,iterations=2)
threshold_image=cv2.dilate(threshold_image,None,iterations=2)
contour=cv2.findContours(threshold_image.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contour=imutils.grab_contours(contour)
c=max(contour,key=cv2.contourArea)
extreme_pnts_left=tuple(c[c[:,:,0].argmin()][0])
extreme_pnts_right=tuple(c[c[:,:,0].argmax()][0])
extreme_pnts_top=tuple(c[c[:,:,1].argmin()][0])
extreme_pnts_bot=tuple(c[c[:,:,1].argmax()][0])
new_image=image[extreme_pnts_top[1]:extreme_pnts_bot[1],extreme_pnts_left[0]:extreme_pnts_right[0]]
resized = cv2.resize(new_image, dim, interpolation = cv2.INTER_AREA)
return resized
def brightness_increase(image,brightness):
'''
increase the brightness of the image
:param image: cv2 object
:param brightness: brightness increasing level
'''
bright=np.ones(image.shape,dtype="uint8")*brightness
brightincreased=cv2.add(image,bright)
return brightincreased
def decrease_brightness(image,brightness):
'''
decrease the brightness of the image
:param image: cv2 object
:param brightness: brightness decreasing level
'''
bright=np.ones(image.shape,dtype="uint8")*50
brightdecrease=cv2.subtract(image,bright)
return brightdecrease
def rotate(image,angle=90, scale=1.0):
'''
Rotate the image
:param image: cv2 object
:param image: image to be processed
:param angle: Rotation angle in degrees. Positive values mean counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner).
:param scale: Isotropic scale factor.
'''
w = image.shape[1]
h = image.shape[0]
#rotate matrix
M = cv2.getRotationMatrix2D((w/2,h/2), angle, scale)
#rotate
image = cv2.warpAffine(image,M,(w,h))
return image
def flip(image,axis):
'''
Flip the image
:param image: cv2 object
:param axis: axis to flip
'''
flip=cv2.flip(image,axis)
return flip
def sharpen(image):
'''
Sharpen the image
:param image: cv2 object
'''
sharpening=np.array([ [-1,-1,-1],
[-1,10,-1],
[-1,-1,-1] ])
sharpened=cv2.filter2D(image,-1,sharpening)
return sharpened
def shear(image,axis):
'''
Shear the image
:param image: cv2 object
:param axis: axis which image will be sheared
'''
rows, cols, dim = image.shape
if axis==0:
M = np.float32([[1, 0.5, 0],
[0, 1 , 0],
[0, 0 , 1]])
elif axis==1:
M = np.float32([[1, 0, 0],
[0.5, 1, 0],
[0, 0, 1]])
sheared_img = cv2.warpPerspective(image,M,(int(cols*1.5),int(rows*1.5)))
return sheared_img
class Ui(QtWidgets.QMainWindow):
def __init__(self):
super(Ui, self).__init__()
uic.loadUi('augmentation.ui', self)
self.show()
self.setWindowTitle("Image Augmentor")
mypixmap=QtGui.QPixmap("2582365.ico")
my_icon=QtGui.QIcon(mypixmap)
self.setWindowIcon(my_icon)
self.lineEdit=self.findChild(QtWidgets.QLineEdit,'lineEdit')
self.checkBox = self.findChild(QtWidgets.QCheckBox,'checkBox_1')
self.checkBox_2=self.findChild(QtWidgets.QCheckBox,'checkBox_2')
self.checkBox_3=self.findChild(QtWidgets.QCheckBox,'checkBox_3')
self.checkBox_4=self.findChild(QtWidgets.QCheckBox,'checkBox_4')
self.checkBox_5=self.findChild(QtWidgets.QCheckBox,'checkBox_5')
self.checkBox_6=self.findChild(QtWidgets.QCheckBox,'checkBox_6')
self.checkBox_7=self.findChild(QtWidgets.QCheckBox,'checkBox_7')
self.button=self.findChild(QtWidgets.QPushButton,'pushButton')
self.button2=self.findChild(QtWidgets.QPushButton,'pushButton_2')
self.button3=self.findChild(QtWidgets.QPushButton,'pushButton_3')
self.button2.clicked.connect(self.clear)
self.button3.clicked.connect(self.clearline)
self.progress=self.findChild(QtWidgets.QProgressBar,'progressBar')
self.spin1=self.findChild(QtWidgets.QSpinBox,'spinBox')
self.spin2=self.findChild(QtWidgets.QSpinBox,'spinBox_2')
self.button.clicked.connect(self.submit)
def clearline(self):
self.lineEdit.clear()
def clear(self):
if self.button2.text()=="Clear Choices":
self.button2.setText("Toggle")
self.checkBox.setChecked(False)
self.checkBox_2.setChecked(False)
self.checkBox_3.setChecked(False)
self.checkBox_4.setChecked(False)
self.checkBox_5.setChecked(False)
self.checkBox_6.setChecked(False)
self.checkBox_7.setChecked(False)
elif self.button2.text()=="Toggle":
self.button2.setText("Clear Choices")
self.checkBox.setChecked(True)
self.checkBox_2.setChecked(True)
self.checkBox_3.setChecked(True)
self.checkBox_4.setChecked(True)
self.checkBox_5.setChecked(True)
self.checkBox_6.setChecked(True)
self.checkBox_7.setChecked(True)
def submit(self):
counter=0
dim=(int(self.spin1.value()),int(self.spin2.value()))
path=self.lineEdit.text()
if str(path).startswith('"') and str(path).endswith('"'):
path=path[1:-1]
my_path=str(path).split("\\")
folder_name=my_path.copy().pop()
my_path_popped=my_path[:-1]
# using list comprehension
def listToString(s):
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += ele
str1 += "/"
# return string
return str1
poppedString=listToString(my_path_popped)
if not os.path.exists(poppedString):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText("Error")
msg.setInformativeText("Change your path!")
msg.setWindowTitle("Error!")
msg.setDetailedText("Program could not find the path you provided.")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
msg.exec_()
String=listToString(my_path)
i=float(0.000001)
aug_path=poppedString+"/augmented_"+folder_name
if not os.path.exists(aug_path):
os.makedirs(aug_path)
for subdir, dirs, files in os.walk(String):
for file in files:
QGuiApplication.processEvents()
filepath = subdir + "/" + file
imagem=cv2.imread(filepath)
resized=0
if self.checkBox.isChecked():
cv2.imwrite(aug_path+"/cntrdrszd_"+file,contour_crop_resize(imagem,dim))
resized=contour_crop_resize(imagem,dim)
else:
cv2.imwrite(aug_path+"/rszd_"+file,contour_crop_no_resize(imagem,dim))
resized=contour_crop_no_resize(imagem,dim)
if self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/brinc_"+file,brightness_increase(resized,50))
if self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/brdec_"+file,decrease_brightness(resized,50))
if self.checkBox_4.isChecked():
cv2.imwrite(aug_path+"/rtd90_"+file,rotate(resized,90,1))
cv2.imwrite(aug_path+"/rtd45_"+file,rotate(resized,45,1))
cv2.imwrite(aug_path+"/rtd30_"+file,rotate(resized,30,1))
cv2.imwrite(aug_path+"/rtd270_"+file,rotate(resized,270,1))
cv2.imwrite(aug_path+"/rtd315_"+file,rotate(resized,315,1))
cv2.imwrite(aug_path+"/rtd330_"+file,rotate(resized,330,1))
if self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_rtd90_"+file,rotate(brightness_increase(resized,50),90,1))
cv2.imwrite(aug_path+"/binc_rtd45_"+file,rotate(brightness_increase(resized,50),45,1))
cv2.imwrite(aug_path+"/binc_rtd30_"+file,rotate(brightness_increase(resized,50),30,1))
cv2.imwrite(aug_path+"/binc_rtd270_"+file,rotate(brightness_increase(resized,50),270,1))
cv2.imwrite(aug_path+"/binc_rtd315_"+file,rotate(brightness_increase(resized,50),315,1))
cv2.imwrite(aug_path+"/binc_rtd330_"+file,rotate(brightness_increase(resized,50),330,1))
if self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_rtd90_"+file,rotate(decrease_brightness(resized,50),90,1))
cv2.imwrite(aug_path+"/bdec_rtd45_"+file,rotate(decrease_brightness(resized,50),45,1))
cv2.imwrite(aug_path+"/bdec_rtd30_"+file,rotate(decrease_brightness(resized,50),30,1))
cv2.imwrite(aug_path+"/bdec_rtd270_"+file,rotate(decrease_brightness(resized,50),270,1))
cv2.imwrite(aug_path+"/bdec_rtd315_"+file,rotate(decrease_brightness(resized,50),315,1))
cv2.imwrite(aug_path+"/bdec_rtd330_"+file,rotate(decrease_brightness(resized,50),330,1))
if self.checkBox_5.isChecked():
cv2.imwrite(aug_path+"/flipxy_"+file,flip(resized,-1))
cv2.imwrite(aug_path+"/flipx_"+file,flip(resized,0))
cv2.imwrite(aug_path+"/flipy_"+file,flip(resized,1))
if self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_flipxy_"+file,flip(brightness_increase(resized,50),-1))
cv2.imwrite(aug_path+"/binc_flipx_"+file,flip(brightness_increase(resized,50),0))
cv2.imwrite(aug_path+"/bdec_flipy_"+file,flip(brightness_increase(resized,50),1))
if self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_flipxy_"+file,flip(decrease_brightness(resized,50),-1))
cv2.imwrite(aug_path+"/bdec_flipx_"+file,flip(decrease_brightness(resized,50),0))
cv2.imwrite(aug_path+"/bdec_flipy_"+file,flip(decrease_brightness(resized,50),1))
if self.checkBox_4.isChecked():
cv2.imwrite(aug_path+"/rtd90_flipxy_"+file,flip(rotate(resized,90,1),-1))
cv2.imwrite(aug_path+"/rtd45_flipxy_"+file,flip(rotate(resized,45,1),-1))
cv2.imwrite(aug_path+"/rtd30_flipxy_"+file,flip(rotate(resized,30,1),-1))
cv2.imwrite(aug_path+"/rtd270_flipxy_"+file,flip(rotate(resized,270,1),-1))
cv2.imwrite(aug_path+"/rtd315_flipxy_"+file,flip(rotate(resized,315,1),-1))
cv2.imwrite(aug_path+"/rtd330_flipxy_"+file,flip(rotate(resized,330,1),-1))
cv2.imwrite(aug_path+"/rtd90_flipx_"+file,flip(rotate(resized,90,1),0))
cv2.imwrite(aug_path+"/rtd45_flipx_"+file,flip(rotate(resized,45,1),0))
cv2.imwrite(aug_path+"/rtd30_flipx_"+file,flip(rotate(resized,30,1),0))
cv2.imwrite(aug_path+"/rtd270_flipx_"+file,flip(rotate(resized,270,1),0))
cv2.imwrite(aug_path+"/rtd315_flipx_"+file,flip(rotate(resized,315,1),0))
cv2.imwrite(aug_path+"/rtd330_flipx_"+file,flip(rotate(resized,330,1),0))
cv2.imwrite(aug_path+"/rtd90_flipy_"+file,flip(rotate(resized,90,1),1))
cv2.imwrite(aug_path+"/rtd45_flipy_"+file,flip(rotate(resized,45,1),1))
cv2.imwrite(aug_path+"/rtd30_flipy_"+file,flip(rotate(resized,30,1),1))
cv2.imwrite(aug_path+"/rtd270_flipy_"+file,flip(rotate(resized,270,1),1))
cv2.imwrite(aug_path+"/rtd315_flipy_"+file,flip(rotate(resized,315,1),1))
cv2.imwrite(aug_path+"/rtd330_flipy_"+file,flip(rotate(resized,330,1),1))
if self.checkBox_4.isChecked() and self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_rtd90_flipxy_"+file,flip(rotate(brightness_increase(resized,50),90,1),-1))
cv2.imwrite(aug_path+"/binc_rtd45_flipxy_"+file,flip(rotate(brightness_increase(resized,50),45,1),-1))
cv2.imwrite(aug_path+"/binc_rtd30_flipxy_"+file,flip(rotate(brightness_increase(resized,50),30,1),-1))
cv2.imwrite(aug_path+"/binc_rtd270_flipxy_"+file,flip(rotate(brightness_increase(resized,50),270,1),-1))
cv2.imwrite(aug_path+"/binc_rtd315_flipxy_"+file,flip(rotate(brightness_increase(resized,50),315,1),-1))
cv2.imwrite(aug_path+"/binc_rtd330_flipxy_"+file,flip(rotate(brightness_increase(resized,50),330,1),-1))
cv2.imwrite(aug_path+"/binc_rtd90_flipx_"+file,flip(rotate(brightness_increase(resized,50),90,1),0))
cv2.imwrite(aug_path+"/binc_rtd45_flipx_"+file,flip(rotate(brightness_increase(resized,50),45,1),0))
cv2.imwrite(aug_path+"/binc_rtd30_flipx_"+file,flip(rotate(brightness_increase(resized,50),30,1),0))
cv2.imwrite(aug_path+"/binc_rtd270_flipx_"+file,flip(rotate(brightness_increase(resized,50),270,1),0))
cv2.imwrite(aug_path+"/binc_rtd315_flipx_"+file,flip(rotate(brightness_increase(resized,50),315,1),0))
cv2.imwrite(aug_path+"/binc_rtd330_flipx_"+file,flip(rotate(brightness_increase(resized,50),330,1),0))
cv2.imwrite(aug_path+"/binc_rtd90_flipy_"+file,flip(rotate(brightness_increase(resized,50),90,1),1))
cv2.imwrite(aug_path+"/binc_rtd45_flipy_"+file,flip(rotate(brightness_increase(resized,50),45,1),1))
cv2.imwrite(aug_path+"/binc_rtd30_flipy_"+file,flip(rotate(brightness_increase(resized,50),30,1),1))
cv2.imwrite(aug_path+"/binc_rtd270_flipy_"+file,flip(rotate(brightness_increase(resized,50),270,1),1))
cv2.imwrite(aug_path+"/binc_rtd315_flipy_"+file,flip(rotate(brightness_increase(resized,50),315,1),1))
cv2.imwrite(aug_path+"/binc_rtd330_flipy_"+file,flip(rotate(brightness_increase(resized,50),330,1),1))
if self.checkBox_4.isChecked() and self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_rtd90_flipxy_"+file,flip(rotate(decrease_brightness(resized,50),90,1),-1))
cv2.imwrite(aug_path+"/bdec_rtd45_flipxy_"+file,flip(rotate(decrease_brightness(resized,50),45,1),-1))
cv2.imwrite(aug_path+"/bdec_rtd30_flipxy_"+file,flip(rotate(decrease_brightness(resized,50),30,1),-1))
cv2.imwrite(aug_path+"/bdec_rtd270_flipxy_"+file,flip(rotate(decrease_brightness(resized,50),270,1),-1))
cv2.imwrite(aug_path+"/bdec_rtd315_flipxy_"+file,flip(rotate(decrease_brightness(resized,50),315,1),-1))
cv2.imwrite(aug_path+"/bdec_rtd330_flipxy_"+file,flip(rotate(decrease_brightness(resized,50),330,1),-1))
cv2.imwrite(aug_path+"/bdec_rtd90_flipx_"+file,flip(rotate(decrease_brightness(resized,50),90,1),0))
cv2.imwrite(aug_path+"/bdec_rtd45_flipx_"+file,flip(rotate(decrease_brightness(resized,50),45,1),0))
cv2.imwrite(aug_path+"/bdec_rtd30_flipx_"+file,flip(rotate(decrease_brightness(resized,50),30,1),0))
cv2.imwrite(aug_path+"/bdec_rtd270_flipx_"+file,flip(rotate(decrease_brightness(resized,50),270,1),0))
cv2.imwrite(aug_path+"/bdec_rtd315_flipx_"+file,flip(rotate(decrease_brightness(resized,50),315,1),0))
cv2.imwrite(aug_path+"/bdec_rtd330_flipx_"+file,flip(rotate(decrease_brightness(resized,50),330,1),0))
cv2.imwrite(aug_path+"/bdec_rtd90_flipy_"+file,flip(rotate(decrease_brightness(resized,50),90,1),1))
cv2.imwrite(aug_path+"/bdec_rtd45_flipy_"+file,flip(rotate(decrease_brightness(resized,50),45,1),1))
cv2.imwrite(aug_path+"/bdec_rtd30_flipy_"+file,flip(rotate(decrease_brightness(resized,50),30,1),1))
cv2.imwrite(aug_path+"/bdec_rtd270_flipy_"+file,flip(rotate(decrease_brightness(resized,50),270,1),1))
cv2.imwrite(aug_path+"/bdec_rtd315_flipy_"+file,flip(rotate(decrease_brightness(resized,50),315,1),1))
cv2.imwrite(aug_path+"/bdec_rtd330_flipy_"+file,flip(rotate(decrease_brightness(resized,50),330,1),1))
if self.checkBox_6.isChecked():
cv2.imwrite(aug_path+"/shrpnd_"+file,sharpen(resized))
if self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_shrpnd_"+file,sharpen(brightness_increase(resized,50)))
if self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_shrpnd_"+file,sharpen(decrease_brightness(resized,50)))
if self.checkBox_4.isChecked():
cv2.imwrite(aug_path+"/rtd90_shrpnd_"+file,sharpen(rotate(resized,90,1)))
cv2.imwrite(aug_path+"/rtd45_shrpnd_"+file,sharpen(rotate(resized,45,1)))
cv2.imwrite(aug_path+"/rtd30_shrpnd_"+file,sharpen(rotate(resized,30,1)))
cv2.imwrite(aug_path+"/rtd270_shrpnd_"+file,sharpen(rotate(resized,270,1)))
cv2.imwrite(aug_path+"/rtd315_shrpnd_"+file,sharpen(rotate(resized,315,1)))
cv2.imwrite(aug_path+"/rtd330_shrpnd_"+file,sharpen(rotate(resized,330,1)))
if self.checkBox_5.isChecked():
cv2.imwrite(aug_path+"/flipxy_shrpnd_"+file,sharpen(flip(resized,-1)))
cv2.imwrite(aug_path+"/flipx_shrpnd_"+file,sharpen(flip(resized,0)))
cv2.imwrite(aug_path+"/flipy_shrpnd"+file,sharpen(flip(resized,1)))
if self.checkBox_4.isChecked() and self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_rtd90_shrpnd_"+file,sharpen(rotate(brightness_increase(resized,50),90,1)))
cv2.imwrite(aug_path+"/binc_rtd45_shrpnd_"+file,sharpen(rotate(brightness_increase(resized,50),45,1)))
cv2.imwrite(aug_path+"/binc_rtd30_shrpnd_"+file,sharpen(rotate(brightness_increase(resized,50),30,1)))
cv2.imwrite(aug_path+"/binc_rtd270_shrpnd_"+file,sharpen(rotate(brightness_increase(resized,50),270,1)))
cv2.imwrite(aug_path+"/binc_rtd315_shrpnd_"+file,sharpen(rotate(brightness_increase(resized,50),315,1)))
cv2.imwrite(aug_path+"/binc_rtd330_shrpnd_"+file,sharpen(rotate(brightness_increase(resized,50),330,1)))
if self.checkBox_4.isChecked() and self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_rtd90_shrpnd_"+file,sharpen(rotate(decrease_brightness(resized,50),90,1)))
cv2.imwrite(aug_path+"/bdec_rtd45_shrpnd_"+file,sharpen(rotate(decrease_brightness(resized,50),45,1)))
cv2.imwrite(aug_path+"/bdec_rtd30_shrpnd_"+file,sharpen(rotate(decrease_brightness(resized,50),30,1)))
cv2.imwrite(aug_path+"/bdec_rtd270_shrpnd_"+file,sharpen(rotate(decrease_brightness(resized,50),270,1)))
cv2.imwrite(aug_path+"/bdec_rtd315_shrpnd_"+file,sharpen(rotate(decrease_brightness(resized,50),315,1)))
cv2.imwrite(aug_path+"/bdec_rtd330_shrpnd_"+file,sharpen(rotate(decrease_brightness(resized,50),330,1)))
if self.checkBox_5.isChecked() and self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_flipxy_shrpnd_"+file,sharpen(flip(brightness_increase(resized,50),-1)))
cv2.imwrite(aug_path+"/binc_flipx_shrpnd_"+file,sharpen(flip(brightness_increase(resized,50),0)))
cv2.imwrite(aug_path+"/binc_flipy_shrpnd_"+file,sharpen(flip(brightness_increase(resized,50),1)))
if self.checkBox_5.isChecked() and self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_flipxy_shrpnd_"+file,sharpen(flip(decrease_brightness(resized,50),-1)))
cv2.imwrite(aug_path+"/bdec_flipx_shrpnd_"+file,sharpen(flip(decrease_brightness(resized,50),0)))
cv2.imwrite(aug_path+"/bdec_flipy_shrpnd_"+file,sharpen(flip(decrease_brightness(resized,50),1)))
if self.checkBox_5.isChecked() and self.checkBox_4.isChecked():
cv2.imwrite(aug_path+"/rtd90_flipxy_shrpnd_"+file,sharpen(flip(rotate(resized,90,1),-1)))
cv2.imwrite(aug_path+"/rtd45_flipxy_shrpnd_"+file,sharpen(flip(rotate(resized,45,1),-1)))
cv2.imwrite(aug_path+"/rtd30_flipxy_shrpnd_"+file,sharpen(flip(rotate(resized,30,1),-1)))
cv2.imwrite(aug_path+"/rtd270_flipxy_shrpnd_"+file,sharpen(flip(rotate(resized,270,1),-1)))
cv2.imwrite(aug_path+"/rtd315_flipxy_shrpnd_"+file,sharpen(flip(rotate(resized,315,1),-1)))
cv2.imwrite(aug_path+"/rtd330_flipxy_shrpnd_"+file,sharpen(flip(rotate(resized,330,1),-1)))
cv2.imwrite(aug_path+"/rtd90_flipx_shrpnd_"+file,sharpen(flip(rotate(resized,90,1),0)))
cv2.imwrite(aug_path+"/rtd45_flipx_shrpnd_"+file,sharpen(flip(rotate(resized,45,1),0)))
cv2.imwrite(aug_path+"/rtd30_flipx_shrpnd_"+file,sharpen(flip(rotate(resized,30,1),0)))
cv2.imwrite(aug_path+"/rtd270_flipx_shrpnd_"+file,sharpen(flip(rotate(resized,270,1),0)))
cv2.imwrite(aug_path+"/rtd315_flipx_shrpnd_"+file,sharpen(flip(rotate(resized,315,1),0)))
cv2.imwrite(aug_path+"/rtd330_flipx_shrpnd_"+file,sharpen(flip(rotate(resized,330,1),0)))
cv2.imwrite(aug_path+"/rtd90_flipy_shrpnd_"+file,sharpen(flip(rotate(resized,90,1),1)))
cv2.imwrite(aug_path+"/rtd45_flipy_shrpnd_"+file,sharpen(flip(rotate(resized,45,1),1)))
cv2.imwrite(aug_path+"/rtd30_flipy_shrpnd_"+file,sharpen(flip(rotate(resized,30,1),1)))
cv2.imwrite(aug_path+"/rtd270_flipy_shrpnd_"+file,sharpen(flip(rotate(resized,270,1),1)))
cv2.imwrite(aug_path+"/rtd315_flipy_shrpnd_"+file,sharpen(flip(rotate(resized,315,1),1)))
cv2.imwrite(aug_path+"/rtd330_flipy_shrpnd_"+file,sharpen(flip(rotate(resized,330,1),1)))
if self.checkBox_7.isChecked():
cv2.imwrite(aug_path+"/shrdx_"+file,shear(resized,0))
cv2.imwrite(aug_path+"/shrdy_"+file,shear(resized,1))
if self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_shrdx_"+file,shear(brightness_increase(resized,50),0))
cv2.imwrite(aug_path+"/binc_shrdy_"+file,shear(brightness_increase(resized,50),1))
if self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_shrdx_"+file,shear(decrease_brightness(resized,50),0))
cv2.imwrite(aug_path+"/bdec_shrdy_"+file,shear(decrease_brightness(resized,50),1))
if self.checkBox_4.isChecked():
cv2.imwrite(aug_path+"/rtd90_shrdx_"+file,shear(rotate(resized,90,1),0))
cv2.imwrite(aug_path+"/rtd45_shrdx_"+file,shear(rotate(resized,45,1),0))
cv2.imwrite(aug_path+"/rtd30_shrdx_"+file,shear(rotate(resized,30,1),0))
cv2.imwrite(aug_path+"/rtd270_shrdx_"+file,shear(rotate(resized,270,1),0))
cv2.imwrite(aug_path+"/rtd315_shrdx_"+file,shear(rotate(resized,315,1),0))
cv2.imwrite(aug_path+"/rtd330_shrdx_"+file,shear(rotate(resized,330,1),0))
cv2.imwrite(aug_path+"/rtd90_shrdy_"+file,shear(rotate(resized,90,1),1))
cv2.imwrite(aug_path+"/rtd45_shrdy_"+file,shear(rotate(resized,45,1),1))
cv2.imwrite(aug_path+"/rtd30_shrdy_"+file,shear(rotate(resized,30,1),1))
cv2.imwrite(aug_path+"/rtd270_shrdy_"+file,shear(rotate(resized,270,1),1))
cv2.imwrite(aug_path+"/rtd315_shrdy_"+file,shear(rotate(resized,315,1),1))
cv2.imwrite(aug_path+"/rtd330_shrdy_"+file,shear(rotate(resized,330,1),1))
if self.checkBox_5.isChecked():
cv2.imwrite(aug_path+"/flipxy_shrdx_"+file,shear(flip(resized,-1),0))
cv2.imwrite(aug_path+"/flipx_shrdx_"+file,shear(flip(resized,0),0))
cv2.imwrite(aug_path+"/flipy_shrdx"+file,shear(flip(resized,1),0))
cv2.imwrite(aug_path+"/flipxy_shrdy_"+file,shear(flip(resized,-1),1))
cv2.imwrite(aug_path+"/flipx_shrdy_"+file,shear(flip(resized,0),1))
cv2.imwrite(aug_path+"/flipy_shrdy"+file,shear(flip(resized,1),1))
if self.checkBox_6.isChecked():
cv2.imwrite(aug_path+"/shrpnd_shrdx"+file,shear(sharpen(resized),0))
cv2.imwrite(aug_path+"/shrpnd_shrdy"+file,shear(sharpen(resized),1))
if self.checkBox_4.isChecked() and self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_rtd90_shrdx_"+file,shear(rotate(brightness_increase(resized,50),90,1),0))
cv2.imwrite(aug_path+"/binc_rtd45_shrdx_"+file,shear(rotate(brightness_increase(resized,50),45,1),0))
cv2.imwrite(aug_path+"/binc_rtd30_shrdx_"+file,shear(rotate(brightness_increase(resized,50),30,1),0))
cv2.imwrite(aug_path+"/binc_rtd270_shrdx_"+file,shear(rotate(brightness_increase(resized,50),270,1),0))
cv2.imwrite(aug_path+"/binc_rtd315_shrdx_"+file,shear(rotate(brightness_increase(resized,50),315,1),0))
cv2.imwrite(aug_path+"/binc_rtd330_shrdx_"+file,shear(rotate(brightness_increase(resized,50),330,1),0))
cv2.imwrite(aug_path+"/binc_rtd90_shrdy_"+file,shear(rotate(brightness_increase(resized,50),90,1),1))
cv2.imwrite(aug_path+"/binc_rtd45_shrdy_"+file,shear(rotate(brightness_increase(resized,50),45,1),1))
cv2.imwrite(aug_path+"/binc_rtd30_shrdy_"+file,shear(rotate(brightness_increase(resized,50),30,1),1))
cv2.imwrite(aug_path+"/binc_rtd270_shrdy_"+file,shear(rotate(brightness_increase(resized,50),270,1),1))
cv2.imwrite(aug_path+"/binc_rtd315_shrdy_"+file,shear(rotate(brightness_increase(resized,50),315,1),1))
cv2.imwrite(aug_path+"/binc_rtd330_shrdy_"+file,shear(rotate(brightness_increase(resized,50),330,1),1))
if self.checkBox_4.isChecked() and self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_rtd90_shrdx_"+file,shear(rotate(decrease_brightness(resized,50),90,1),0))
cv2.imwrite(aug_path+"/bdec_rtd45_shrdx_"+file,shear(rotate(decrease_brightness(resized,50),45,1),0))
cv2.imwrite(aug_path+"/bdec_rtd30_shrdx_"+file,shear(rotate(decrease_brightness(resized,50),30,1),0))
cv2.imwrite(aug_path+"/bdec_rtd270_shrdx_"+file,shear(rotate(decrease_brightness(resized,50),270,1),0))
cv2.imwrite(aug_path+"/bdec_rtd315_shrdx_"+file,shear(rotate(decrease_brightness(resized,50),315,1),0))
cv2.imwrite(aug_path+"/bdec_rtd330_shrdx_"+file,shear(rotate(decrease_brightness(resized,50),330,1),0))
cv2.imwrite(aug_path+"/bdec_rtd90_shrdy_"+file,shear(rotate(decrease_brightness(resized,50),90,1),1))
cv2.imwrite(aug_path+"/bdec_rtd45_shrdy_"+file,shear(rotate(decrease_brightness(resized,50),45,1),1))
cv2.imwrite(aug_path+"/bdec_rtd30_shrdy_"+file,shear(rotate(decrease_brightness(resized,50),30,1),1))
cv2.imwrite(aug_path+"/bdec_rtd270_shrdy_"+file,shear(rotate(decrease_brightness(resized,50),270,1),1))
cv2.imwrite(aug_path+"/bdec_rtd315_shrdy_"+file,shear(rotate(decrease_brightness(resized,50),315,1),1))
cv2.imwrite(aug_path+"/bdec_rtd330_shrdy_"+file,shear(rotate(decrease_brightness(resized,50),330,1),1))
if self.checkBox_5.isChecked() and self.checkBox_2.isChecked():
cv2.imwrite(aug_path+"/binc_flipxy_shrdx_"+file,shear(flip(brightness_increase(resized,50),-1),0))
cv2.imwrite(aug_path+"/binc_flipx_shrdx_"+file,shear(flip(brightness_increase(resized,50),0),0))
cv2.imwrite(aug_path+"/binc_flipy_shrdx_"+file,shear(flip(brightness_increase(resized,50),1),0))
cv2.imwrite(aug_path+"/binc_flipxy_shrdy_"+file,shear(flip(brightness_increase(resized,50),-1),1))
cv2.imwrite(aug_path+"/binc_flipx_shrdy_"+file,shear(flip(brightness_increase(resized,50),0),1))
cv2.imwrite(aug_path+"/binc_flipy_shrdy_"+file,shear(flip(brightness_increase(resized,50),1),1))
if self.checkBox_5.isChecked() and self.checkBox_3.isChecked():
cv2.imwrite(aug_path+"/bdec_flipxy_shrdx_"+file,shear(flip(decrease_brightness(resized,50),-1),0))
cv2.imwrite(aug_path+"/bdec_flipx_shrdx_"+file,shear(flip(decrease_brightness(resized,50),0),0))
cv2.imwrite(aug_path+"/bdec_flipy_shrdx_"+file,shear(flip(decrease_brightness(resized,50),1),0))
cv2.imwrite(aug_path+"/bdec_flipxy_shrdy_"+file,shear(flip(decrease_brightness(resized,50),-1),1))
cv2.imwrite(aug_path+"/bdec_flipx_shrdy_"+file,shear(flip(decrease_brightness(resized,50),0),1))
cv2.imwrite(aug_path+"/bdec_flipy_shrdy_"+file,shear(flip(decrease_brightness(resized,50),1),1))
if self.checkBox_5.isChecked() and self.checkBox_4.isChecked():
cv2.imwrite(aug_path+"/rtd90_flipxy_shrdx_"+file,shear(flip(rotate(resized,90,1),-1),0))
cv2.imwrite(aug_path+"/rtd45_flipxy_shrdx_"+file,shear(flip(rotate(resized,45,1),-1),0))
cv2.imwrite(aug_path+"/rtd30_flipxy_shrdx_"+file,shear(flip(rotate(resized,30,1),-1),0))
cv2.imwrite(aug_path+"/rtd270_flipxy_shrdx_"+file,shear(flip(rotate(resized,270,1),-1),0))
cv2.imwrite(aug_path+"/rtd315_flipxy_shrdx_"+file,shear(flip(rotate(resized,315,1),-1),0))
cv2.imwrite(aug_path+"/rtd330_flipxy_shrdx_"+file,shear(flip(rotate(resized,330,1),-1),0))
cv2.imwrite(aug_path+"/rtd90_flipx_shrdx_"+file,shear(flip(rotate(resized,90,1),0),0))
cv2.imwrite(aug_path+"/rtd45_flipx_shrdx_"+file,shear(flip(rotate(resized,45,1),0),0))
cv2.imwrite(aug_path+"/rtd30_flipx_shrdx_"+file,shear(flip(rotate(resized,30,1),0),0))
cv2.imwrite(aug_path+"/rtd270_flipx_shrdx_"+file,shear(flip(rotate(resized,270,1),0),0))
cv2.imwrite(aug_path+"/rtd315_flipx_shrdx_"+file,shear(flip(rotate(resized,315,1),0),0))
cv2.imwrite(aug_path+"/rtd330_flipx_shrdx_"+file,shear(flip(rotate(resized,330,1),0),0))
cv2.imwrite(aug_path+"/rtd90_flipy_shrdx_"+file,shear(flip(rotate(resized,90,1),1),0))
cv2.imwrite(aug_path+"/rtd45_flipy_shrdx_"+file,shear(flip(rotate(resized,45,1),1),0))
cv2.imwrite(aug_path+"/rtd30_flipy_shrdx_"+file,shear(flip(rotate(resized,30,1),1),0))
cv2.imwrite(aug_path+"/rtd270_flipy_shrdx_"+file,shear(flip(rotate(resized,270,1),1),0))
cv2.imwrite(aug_path+"/rtd315_flipy_shrdx_"+file,shear(flip(rotate(resized,315,1),1),0))
cv2.imwrite(aug_path+"/rtd330_flipy_shrdx_"+file,shear(flip(rotate(resized,330,1),1),0))
cv2.imwrite(aug_path+"/rtd90_flipxy_shrdy_"+file,shear(flip(rotate(resized,90,1),-1),1))
cv2.imwrite(aug_path+"/rtd45_flipxy_shrdy_"+file,shear(flip(rotate(resized,45,1),-1),1))
cv2.imwrite(aug_path+"/rtd30_flipxy_shrdy_"+file,shear(flip(rotate(resized,30,1),-1),1))
cv2.imwrite(aug_path+"/rtd270_flipxy_shrdy_"+file,shear(flip(rotate(resized,270,1),-1),1))
cv2.imwrite(aug_path+"/rtd315_flipxy_shrdy_"+file,shear(flip(rotate(resized,315,1),-1),1))
cv2.imwrite(aug_path+"/rtd330_flipxy_shrdy_"+file,shear(flip(rotate(resized,330,1),-1),1))
cv2.imwrite(aug_path+"/rtd90_flipx_shrdy_"+file,shear(flip(rotate(resized,90,1),0),1))
cv2.imwrite(aug_path+"/rtd45_flipx_shrdy_"+file,shear(flip(rotate(resized,45,1),0),1))
cv2.imwrite(aug_path+"/rtd30_flipx_shrdy_"+file,shear(flip(rotate(resized,30,1),0),1))
cv2.imwrite(aug_path+"/rtd270_flipx_shrdy_"+file,shear(flip(rotate(resized,270,1),0),1))
cv2.imwrite(aug_path+"/rtd315_flipx_shrdy_"+file,shear(flip(rotate(resized,315,1),0),1))
cv2.imwrite(aug_path+"/rtd330_flipx_shrdy_"+file,shear(flip(rotate(resized,330,1),0),1))
cv2.imwrite(aug_path+"/rtd90_flipy_shrdy_"+file,shear(flip(rotate(resized,90,1),1),1))
cv2.imwrite(aug_path+"/rtd45_flipy_shrdy_"+file,shear(flip(rotate(resized,45,1),1),1))
cv2.imwrite(aug_path+"/rtd30_flipy_shrdy_"+file,shear(flip(rotate(resized,30,1),1),1))
cv2.imwrite(aug_path+"/rtd270_flipy_shrdy_"+file,shear(flip(rotate(resized,270,1),1),1))
cv2.imwrite(aug_path+"/rtd315_flipy_shrdy_"+file,shear(flip(rotate(resized,315,1),1),1))
cv2.imwrite(aug_path+"/rtd330_flipy_shrdy_"+file,shear(flip(rotate(resized,330,1),1),1))
if self.progress.value!=0 and counter==0:
i+=float(float(100)/float(len(files)))
self.progress.setValue(i)
print(counter,self.progress.value())
if 99==self.progress.value() and counter==0:
self.progress.setValue(0)
counter+=1
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Information")
msg.setInformativeText("Completed!")
msg.setWindowTitle("Finished")
msg.setDetailedText("Your process has been succesfully completed.")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
msg.exec_()
elif 100<=self.progress.value() and counter==0:
counter+=1
self.progress.setValue(0)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Information")
msg.setInformativeText("Completed!")
msg.setWindowTitle("Finished")
msg.setDetailedText("Your process has been succesfully completed.")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
msg.exec_()
app = QtWidgets.QApplication(sys.argv)
app.setStyle('Fusion')
window = Ui()
app.exec_()
|
[
"cv2.GaussianBlur",
"os.walk",
"numpy.ones",
"PyQt5.uic.loadUi",
"cv2.warpAffine",
"PyQt5.QtWidgets.QApplication",
"cv2.erode",
"cv2.getRotationMatrix2D",
"cv2.subtract",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"os.path.exists",
"cv2.resize",
"PyQt5.QtGui.QPixmap",
"imutils.grab_contours",
"cv2.flip",
"cv2.add",
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QMessageBox",
"os.makedirs",
"cv2.threshold",
"numpy.float32",
"cv2.imread",
"numpy.array",
"PyQt5.QtGui.QGuiApplication.processEvents"
] |
[((38929, 38961), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (38951, 38961), False, 'from PyQt5 import QtWidgets, uic, QtGui\n'), ((422, 474), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(image, dim, interpolation=cv2.INTER_AREA)\n', (432, 474), False, 'import cv2\n'), ((689, 728), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (701, 728), False, 'import cv2\n'), ((743, 781), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['grayscale', '(5, 5)', '(0)'], {}), '(grayscale, (5, 5), 0)\n', (759, 781), False, 'import cv2\n'), ((874, 920), 'cv2.erode', 'cv2.erode', (['threshold_image', 'None'], {'iterations': '(2)'}), '(threshold_image, None, iterations=2)\n', (883, 920), False, 'import cv2\n'), ((940, 987), 'cv2.dilate', 'cv2.dilate', (['threshold_image', 'None'], {'iterations': '(2)'}), '(threshold_image, None, iterations=2)\n', (950, 987), False, 'import cv2\n'), ((1097, 1127), 'imutils.grab_contours', 'imutils.grab_contours', (['contour'], {}), '(contour)\n', (1118, 1127), False, 'import imutils\n'), ((1507, 1563), 'cv2.resize', 'cv2.resize', (['new_image', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(new_image, dim, interpolation=cv2.INTER_AREA)\n', (1517, 1563), False, 'import cv2\n'), ((1859, 1881), 'cv2.add', 'cv2.add', (['image', 'bright'], {}), '(image, bright)\n', (1866, 1881), False, 'import cv2\n'), ((2169, 2196), 'cv2.subtract', 'cv2.subtract', (['image', 'bright'], {}), '(image, bright)\n', (2181, 2196), False, 'import cv2\n'), ((2656, 2709), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(w / 2, h / 2)', 'angle', 'scale'], {}), '((w / 2, h / 2), angle, scale)\n', (2679, 2709), False, 'import cv2\n'), ((2731, 2763), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (2745, 2763), False, 'import cv2\n'), ((2913, 2934), 'cv2.flip', 'cv2.flip', (['image', 'axis'], {}), '(image, axis)\n', (2921, 2934), False, 'import cv2\n'), ((3063, 3115), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 10, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 10, -1], [-1, -1, -1]])\n', (3071, 3115), True, 'import numpy as np\n'), ((3177, 3212), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'sharpening'], {}), '(image, -1, sharpening)\n', (3189, 3212), False, 'import cv2\n'), ((800, 852), 'cv2.threshold', 'cv2.threshold', (['grayscale', '(45)', '(255)', 'cv2.THRESH_BINARY'], {}), '(grayscale, 45, 255, cv2.THRESH_BINARY)\n', (813, 852), False, 'import cv2\n'), ((1792, 1827), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (1799, 1827), True, 'import numpy as np\n'), ((2111, 2146), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (2118, 2146), True, 'import numpy as np\n'), ((3448, 3495), 'numpy.float32', 'np.float32', (['[[1, 0.5, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0.5, 0], [0, 1, 0], [0, 0, 1]])\n', (3458, 3495), True, 'import numpy as np\n'), ((3898, 3933), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""augmentation.ui"""', 'self'], {}), "('augmentation.ui', self)\n", (3908, 3933), False, 'from PyQt5 import QtWidgets, uic, QtGui\n'), ((4025, 4053), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""2582365.ico"""'], {}), "('2582365.ico')\n", (4038, 4053), False, 'from PyQt5 import QtWidgets, uic, QtGui\n'), ((4071, 4092), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['mypixmap'], {}), '(mypixmap)\n', (4082, 4092), False, 'from PyQt5 import QtWidgets, uic, QtGui\n'), ((7775, 7790), 'os.walk', 'os.walk', (['String'], {}), '(String)\n', (7782, 7790), False, 'import os\n'), ((3584, 3631), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0.5, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0.5, 1, 0], [0, 0, 1]])\n', (3594, 3631), True, 'import numpy as np\n'), ((7047, 7075), 'os.path.exists', 'os.path.exists', (['poppedString'], {}), '(poppedString)\n', (7061, 7075), False, 'import os\n'), ((7096, 7119), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (7117, 7119), False, 'from PyQt5 import QtWidgets, uic, QtGui\n'), ((7655, 7679), 'os.path.exists', 'os.path.exists', (['aug_path'], {}), '(aug_path)\n', (7669, 7679), False, 'import os\n'), ((7704, 7725), 'os.makedirs', 'os.makedirs', (['aug_path'], {}), '(aug_path)\n', (7715, 7725), False, 'import os\n'), ((7859, 7890), 'PyQt5.QtGui.QGuiApplication.processEvents', 'QGuiApplication.processEvents', ([], {}), '()\n', (7888, 7890), False, 'from PyQt5.QtGui import QGuiApplication\n'), ((7983, 8003), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (7993, 8003), False, 'import cv2\n'), ((37695, 37718), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (37716, 37718), False, 'from PyQt5 import QtWidgets, uic, QtGui\n'), ((38381, 38404), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (38402, 38404), False, 'from PyQt5 import QtWidgets, uic, QtGui\n')]
|
import logging
import sys
from enum import Enum
class LogLevel(Enum):
DEBUG = 1
INFO = 2
WARNING = 3
ERROR = 4
FATAL = 5
logger = logging.getLogger("plenigo")
def log_message(log_level: LogLevel, message: str):
if log_level == LogLevel.DEBUG:
print(message, file=sys.stderr)
logger.debug(message)
elif log_level == LogLevel.INFO:
logger.info(message)
elif log_level == LogLevel.WARNING:
logger.warning(message)
elif log_level == LogLevel.ERROR:
logger.error(message)
elif log_level == LogLevel.FATAL:
logger.fatal(message)
|
[
"logging.getLogger"
] |
[((154, 182), 'logging.getLogger', 'logging.getLogger', (['"""plenigo"""'], {}), "('plenigo')\n", (171, 182), False, 'import logging\n')]
|
#!/usr/bin/env python3
# flake8: noqa: E501
import os
from pathlib import Path
from subprocess import check_call
def call(command: str) -> int:
print(f'calling: "{command}"')
return check_call(command, shell=True)
def clone_or_update_git_repo(url: str, path: str) -> None:
if not Path(path).expanduser().is_dir():
call(f"git clone {url} {path}")
else:
call(f"git -C {path} pull")
def install_apt_packages():
sudo_command = "sudo"
if "PASS" in os.environ:
# Set pass for CI job
sudo_command = f"echo {os.environ.get('PASS')} | sudo -S"
call(
f"{sudo_command} apt update && {sudo_command} apt install -y vim zsh terminator tmux powerline fonts-powerline mmv"
)
def install_fonts():
call(
"wget -P ~/.local/share/fonts https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Regular.ttf"
)
call(
"wget -P ~/.local/share/fonts https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Bold.ttf"
)
call(
"wget -P ~/.local/share/fonts https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Italic.ttf"
)
call(
"wget -P ~/.local/share/fonts https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Bold%20Italic.ttf"
)
def install_oh_my_zsh():
def install_plugins():
oh_path = "~/.oh-my-zsh/custom"
clone_or_update_git_repo(
"https://github.com/romkatv/powerlevel10k.git",
f"{oh_path}/themes/powerlevel10k",
)
clone_or_update_git_repo(
"https://github.com/zsh-users/zsh-autosuggestions.git",
f"{oh_path}/plugins/zsh-autosuggestions",
)
clone_or_update_git_repo(
"https://github.com/zsh-users/zsh-syntax-highlighting.git",
f"{oh_path}/plugins/zsh-syntax-highlighting",
)
if "ZSH" not in os.environ:
call(
'sh -c "$(wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -) --keep-zshrc"'
)
install_plugins()
def install_vim_config():
clone_or_update_git_repo(
"https://github.com/VundleVim/Vundle.vim.git", "~/.vim/bundle/Vundle.vim"
)
call("vim +PluginInstall +PluginClean! +qall")
def install_tmux_config():
clone_or_update_git_repo(
"https://github.com/tmux-plugins/tpm", "~/.tmux/plugins/tpm"
)
call(
"tmux start-server && tmux new-session -d && sleep 1 && ~/.tmux/plugins/tpm/scripts/install_plugins.sh && tmux kill-server"
)
def install_fzf():
clone_or_update_git_repo("https://github.com/junegunn/fzf.git", "~/.fzf")
call("~/.fzf/install --all")
def install_local_config():
Path.home().joinpath(".zshrc.local").touch(exist_ok=True)
def main():
install_apt_packages()
install_fonts()
install_oh_my_zsh()
install_vim_config()
install_tmux_config()
install_fzf()
install_local_config()
if __name__ == "__main__":
main()
|
[
"os.environ.get",
"pathlib.Path",
"pathlib.Path.home",
"subprocess.check_call"
] |
[((194, 225), 'subprocess.check_call', 'check_call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (204, 225), False, 'from subprocess import check_call\n'), ((564, 586), 'os.environ.get', 'os.environ.get', (['"""PASS"""'], {}), "('PASS')\n", (578, 586), False, 'import os\n'), ((2767, 2778), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (2776, 2778), False, 'from pathlib import Path\n'), ((298, 308), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (302, 308), False, 'from pathlib import Path\n')]
|
# chemspi Local DB Class
import pandas as pd
from chemspipy import ChemSpider
class ChemspiLocalDB:
compound_name_column_label = 'Name'
def __init__(self, filename):
self.database = pd.read_excel(filename)
self.db_names = self.database.loc[:,self.compound_name_column_label].copy().str.lower().sort_values()
def find_compound_ids_by_name_mass(self, compound_name, compound_mass, equal_mass_tolerance_percent = 0.001):
TOLERATED_ERROR = compound_mass * equal_mass_tolerance_percent
ids = []
compound_name = compound_name.lower()
search_name_idx = self.db_names.searchsorted(compound_name)
while search_name_idx < self.db_names.shape[0]:
db_row = self.db_names.index[search_name_idx]
db_entry = self.database.iloc[db_row]
#print("Candidate: ",db_entry.Name.lower(), " ", db_entry[config.CA_MOL_WEIGHT_COL])
if db_entry.Name.lower() != compound_name:
break
else:
# check mass
if compound_mass > 0 and abs(db_entry[config.CA_MOL_WEIGHT_COL] - compound_mass) < TOLERATED_ERROR:
ids.append(db_entry.CSID)
search_name_idx += 1
return ids
# end class
|
[
"pandas.read_excel"
] |
[((198, 221), 'pandas.read_excel', 'pd.read_excel', (['filename'], {}), '(filename)\n', (211, 221), True, 'import pandas as pd\n')]
|
import torch
from torch.distributions import RelaxedOneHotCategorical, Categorical, kl_divergence, register_kl
@register_kl(RelaxedOneHotCategorical, RelaxedOneHotCategorical)
def kl_relaxed_one_hot_categorical(p, q):
p = Categorical(probs=p.probs)
q = Categorical(probs=q.probs)
return kl_divergence(p, q)
|
[
"torch.distributions.register_kl",
"torch.distributions.Categorical",
"torch.distributions.kl_divergence"
] |
[((113, 176), 'torch.distributions.register_kl', 'register_kl', (['RelaxedOneHotCategorical', 'RelaxedOneHotCategorical'], {}), '(RelaxedOneHotCategorical, RelaxedOneHotCategorical)\n', (124, 176), False, 'from torch.distributions import RelaxedOneHotCategorical, Categorical, kl_divergence, register_kl\n'), ((225, 251), 'torch.distributions.Categorical', 'Categorical', ([], {'probs': 'p.probs'}), '(probs=p.probs)\n', (236, 251), False, 'from torch.distributions import RelaxedOneHotCategorical, Categorical, kl_divergence, register_kl\n'), ((258, 284), 'torch.distributions.Categorical', 'Categorical', ([], {'probs': 'q.probs'}), '(probs=q.probs)\n', (269, 284), False, 'from torch.distributions import RelaxedOneHotCategorical, Categorical, kl_divergence, register_kl\n'), ((294, 313), 'torch.distributions.kl_divergence', 'kl_divergence', (['p', 'q'], {}), '(p, q)\n', (307, 313), False, 'from torch.distributions import RelaxedOneHotCategorical, Categorical, kl_divergence, register_kl\n')]
|
import sys
import xml.etree.ElementTree as ET
import re
import io
import os
import copy
import datetime
import zlib
import argparse
#import pdfkit
from shutil import copyfile
from mako.template import Template
import pkgutil
import polypacket
import subprocess
import yaml
sizeDict = {
"uint8" : 1,
"int8" : 1,
"char" : 1,
"string" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"int64" : 8,
"uint64" : 8,
"int" : 4,
"float": 4,
"double": 8,
}
cNameDict = {
"uint8" : "uint8_t",
"int8" : "int8_t",
"char" : "char",
"string" : "char",
"uint16" : "uint16_t",
"int16" : "int16_t",
"uint32" : "uint32_t",
"int32" : "int32_t",
"int64" : "int64_t",
"uint64" : "uint64_t",
"int" : "int",
"float" : "float",
"double" : "double",
"enum" : "uint8_t",
"flag" : "uint8_t"
}
formatDict = {
"hex" : "FORMAT_HEX",
"dec" : "FORMAT_DEC",
"default" : "FORMAT_DEFAULT",
"ascii" : "FORMAT_ASCII",
"none" : "FORMAT_NONE"
}
pyFormatDict = {
"uint8" : "B",
"int8" : "b",
"char" : "c",
"string" : "s",
"uint16" : "H",
"int16" : "h",
"uint32" : "L",
"int32" : "l",
"int64" : "q",
"uint64" : "Q",
"int" : "l",
"float": "f",
"double": "d",
}
def crc(fileName):
prev = 0
for eachLine in open(fileName,"rb"):
prev = zlib.crc32(eachLine, prev)
return prev,"%X"%(prev & 0xFFFFFFFF)
class simulator:
def __init__(self,name, simItem):
self.init =""
self.handlers = {}
self.name = name
if 'init' in simItem:
self.init = simItem['init']
if 'handlers' in simItem:
for handler in simItem['handlers']:
name = list(handler.keys())[0]
code = list(handler.values())[0]
self.handlers[name] = code
class fieldVal:
def __init__(self, name):
self.name = name.upper()
self.desc = ""
self.val = None
class fieldDesc:
def __init__(self, name, strType):
self.vals = []
self.valDict = {}
self.arrayLen = 1
self.isEnum = False
self.isMask = False
self.valsFormat = "0x%0.2X"
self.valIndex = 0
self.format = 'FORMAT_DEFAULT'
if strType in ['flag','flags','mask','bits']:
self.format = 'FORMAT_HEX'
self.isMask = True
strType = 'uint8_t'
if strType in ['enum','enums']:
self.format = 'FORMAT_HEX'
self.isEnum = True
strType = 'uint8_t'
m = re.search('\*([0-9]*)', strType)
if(m):
if(m.group(1) != ''):
self.arrayLen = int(m.group(1))
strType = strType[0:m.start()]
strType = strType.lower().replace('_t','')
self.setType(strType, self.arrayLen)
self.id = 0
self.name = name
self.globalName = "PP_FIELD_"+self.name.upper()
self.isVarLen = False
self.isRequired = False
self.desc = ""
self.memberName = "m"+ self.name.capitalize()
def camel(self):
return self.name[:1].capitalize() + self.name[1:]
def setType(self, type, len):
if not (type in cNameDict):
print( "INVALID DATA TYPE!: " + type)
self.arrayLen = len
self.type = type
self.size = sizeDict[self.type] * self.arrayLen
self.objSize = sizeDict[self.type]
self.pyFormat = pyFormatDict[self.type]
self.cType = cNameDict[self.type]
self.cppType = self.cType
self.isString = False
self.isArray = False
if(self.arrayLen > 1):
self.isArray = True
if(self.type == 'string'):
self.cppType = "string"
self.isString = True
self.isArray = True
if(self.arrayLen == 1):
self.arrayLen = 32 #if no arraylen is specified default 32
else:
if(self.isArray):
self.cppType = self.cppType +"*"
def addVal(self, val):
self.valDict[val.name] = len(self.vals) -1
if self.isMask:
val.val = 1 << self.valIndex
self.valIndex+=1
strType = 'uint8'
if len(self.vals) > 8:
self.valsFormat = "0x%0.4X"
strType = 'uint16'
if len(self.vals) > 16:
self.valsFormat = "0x%0.8X"
strType = 'uint32'
if len(self.vals) > 32:
print( "Error maximum flags per field is 32")
self.setType(strType,1)
elif self.isEnum:
val.val = self.valIndex
self.valIndex+=1
self.valDict[val.name] = val.val
self.vals.append(val)
def setPrefix(self, prefix):
self.globalName = prefix.upper()+"_FIELD_"+self.name.upper()
def getFieldDeclaration(self):
output = io.StringIO()
output.write("{0} field_{1}".format(self.cType, self.name))
if(self.arrayLen > 1):
output.write("["+str(self.arrayLen)+"]")
return output.getvalue()
def getParamType(self):
if self.isArray:
return self.cType +"*"
else:
return self.cType;
def getDeclaration(self):
if self.isArray:
return self.cType +" "+self.name+"["+ str(self.arrayLen)+"]"
else:
return self.cType + " " + self.name;
def getFormat(self):
if self.isString:
return "%s"
else:
return "%i"
class packetDesc:
def __init__(self,name, protocol):
self.name = name
self.globalName = "PP_PACKET_"+self.name.upper()
self.className = name.capitalize() +"Packet"
self.desc =""
self.fields = []
self.sruct = False
self.fieldCount=0
self.respondsTo = {}
self.requests = {}
self.standard = False
self.structName = name.lower() + '_packet_t'
self.hasResponse = False
self.protocol = protocol
self.requiredFields = []
self.requiredFieldCount = 0
def camel(self):
return self.name[:1].capitalize() + self.name[1:]
def setPrefix(self, prefix):
self.globalName = prefix.upper()+"_PACKET_"+self.name.upper()
def addField(self, field):
field.id = self.fieldCount
self.fields.append(field)
self.fieldCount+=1
def addYAMLField(self, pfieldItem):
if type(pfieldItem) is dict:
pfname = list(pfieldItem.keys())[0]
pfield = list(pfieldItem.values())[0]
else:
pfname = pfieldItem
pfield = {}
strReq =""
if not (pfname in self.protocol.fieldIdx):
print( 'ERROR Field not declared: ' + pfname)
#get id of field and make a copy
idx = self.protocol.fieldIdx[pfname]
fieldCopy = copy.copy(self.protocol.fields[idx])
if('req' in pfield):
fieldCopy.isRequired = pfield['req']
if('desc' in pfield):
fieldCopy.desc = pfield['desc']
fieldCopy.id = self.fieldCount
self.fields.append(fieldCopy)
self.fieldCount+=1
def postProcess(self):
if len(self.requests) > 0:
self.hasResponse = True;
self.response = self.protocol.getPacket(next(iter(self.requests.keys())))
for field in self.fields:
if field.isRequired:
self.requiredFields.append(field)
self.requiredFieldCount += 1
def tableSize(self):
sum =0;
for field in self.fields:
if field.size > 4:
sum+=4
else:
sum += field.size
return sum
def getDocMd(self):
output = io.StringIO()
idHex = "%0.2X" % self.packetId
output.write('### ' + self.name + '\n')
output.write(self.desc + '\n\n')
output.write('* Packet ID: *['+idHex+']*\n')
requestCount = len(self.requests)
respondsToCount = len(self.respondsTo)
#write response packets
if(requestCount > 0):
output.write('* *Requests: ')
first = True
for req in self.requests:
if(first):
first = False
else:
output.write(', ')
output.write(req)
output.write('*\n\n')
#write request packets
if(self.name == 'Ack'):
output.write('* *Responds To: Any Packet without a defined response*\n\n')
else:
if(respondsToCount > 0):
output.write('* *Responds To: ')
first = True
for resp in self.respondsTo:
if(first):
first = False
else:
output.write(', ')
output.write(resp)
output.write('*\n')
output.write('\n')
rowBytes = io.StringIO()
rowBorder = io.StringIO()
rowFields = io.StringIO()
rowTypes = io.StringIO()
if(len(self.fields) > 0):
rowBytes.write('|***Byte***|')
rowBorder.write('|---|')
rowFields.write('|***Field***')
rowTypes.write('|***Type***')
count =0
for pfield in self.fields:
#write bytes
if(pfield.size > 4):
rowBytes.write(str(count)+'| . . . . . . . |'+str(count+pfield.size -1))
count+=pfield.size
else:
for x in range(pfield.size):
rowBytes.write(str(count) + '|')
count+=1
#write border
span = pfield.size
if(span > 4):
span = 4
for x in range(span):
rowBorder.write('---|')
#write fields
span = pfield.size
if(span > 4):
span = 4
rowFields.write('<td colspan=\''+str(span)+'\'>')
if(pfield.isRequired):
rowFields.write('***'+pfield.name+'***')
else:
rowFields.write(pfield.name)
#write types
span = pfield.size
if(span > 4):
span = 4
rowTypes.write('<td colspan=\''+str(span)+'\'>')
rowTypes.write(pfield.cType)
if(pfield.isArray):
if(pfield.isVarLen):
rowTypes.write('[0-'+ str(pfield.size)+' ]')
else:
rowTypes.write('['+str(pfield.size)+']')
#combine rows for table
output.write(rowBytes.getvalue() + "\n");
output.write(rowBorder.getvalue() + "\n");
output.write(rowFields.getvalue() + "\n");
output.write(rowTypes.getvalue() + "\n");
output.write('\n\n')
output.write('Fields:\n')
#write field description table
for pfield in self.fields:
output.write('>***'+ pfield.name+'*** : ' + pfield.desc +'<br/>\n')
if pfield.isMask:
for idx,val in enumerate(pfield.vals):
strVal = pfield.valsFormat % (1 << idx)
output.write('>> **{0}** : {1} - {2}<br/>\n'.format(strVal, val.name, val.desc))
output.write('>\n')
if pfield.isEnum:
for idx,val in enumerate(pfield.vals):
strVal = pfield.valsFormat % (idx)
output.write('>> **{0}** : {1} - {2}<br/>\n'.format(strVal, val.name, val.desc))
output.write('>\n')
else:
output.write('>This Packet type does not contain any data fields\n\n')
output.write('\n------\n')
return output.getvalue();
class protocolDesc:
def __init__(self, name):
self.name = name
self.fileName = name+"Service"
self.cppFileName = name+"Service"
self.desc = ""
self.hash = ""
self.fields = []
self.fieldIdx = {}
self.fieldId =0
self.fieldGroups = {}
self.packets = []
self.packetIdx ={}
self.packetId =0
self.structs =[]
self.structsAndPackets=[]
self.structIdx ={}
self.structId =0
self.prefix = "pp"
self.snippets = False
self.genUtility = False
self.xmlName =""
self.utilName =""
self.sims = {}
self.defaultResponse = ""
def service(self):
return self.prefix.upper() +'_SERVICE'
def descFromId(self, typeId):
return self.packets[typeId-len(self.structs)]
def fieldDescFromId(self, typeId):
return self.fields[typeId]
def camelPrefix(self):
return self.prefix[:1].capitalize() + self.prefix[1:]
def addField(self,field):
field.id = self.fieldId
field.protocol = self
self.fields.append(field)
self.fieldIdx[field.name] = self.fieldId
self.fieldId+=1
def addGroup(self, name, fields):
self.fieldGroups[name] = fields
def addPacket(self,packet):
packet.packetId = self.packetId
packet.protocol = self
packet.setPrefix(self.prefix)
self.packets.append(packet)
self.structsAndPackets.append(packet)
self.packetIdx[packet.name] = self.packetId
self.packetId+=1
def addStruct(self,struct):
struct.packetId = self.packetId
struct.protocol = self
struct.struct = True
struct.globalName = self.prefix.upper()+"_STRUCT_"+struct.name.upper()
self.structs.append(struct)
self.structsAndPackets.append(struct)
self.structIdx[struct.name] = self.packetId
self.packetId+=1
def getPacket(self, name):
if name in self.packetIdx:
return self.structsAndPackets[self.packetIdx[name]]
def addStandardPackets(protocol):
ping = packetDesc("Ping", protocol)
ack = packetDesc("Ack", protocol)
icd = fieldDesc("icd", "uint32")
icd.isRequired = True
icd.format = 'FORMAT_HEX'
icd.setPrefix(protocol.prefix)
icd.desc = "CRC Hash of protocol description. This is used to verify endpoints are using the same protocol"
ping.desc = "This message requests an Ack from a remote device to test connectivity"
ping.response = ack
ping.hasResponse = True
ping.requests['Ack'] =0
ack.desc ="Acknowledges any packet that does not have an explicit response"
ping.standard = True
ack.standard = True
protocol.addField(icd)
ping.addField(icd)
ack.addField(icd)
protocol.addPacket(ping)
protocol.addPacket(ack)
def parseXML(xmlfile):
# create element tree object
tree = ET.parse(xmlfile)
# get root element
root = tree.getroot()
# create empty list for Fields
protocol = protocolDesc(root.attrib['name'])
protocol.xmlName = os.path.basename(xmlfile)
if('desc' in root.attrib):
protocol.desc = root.attrib['desc']
if('prefix' in root.attrib):
protocol.prefix = root.attrib['prefix']
addStandardPackets(protocol)
#parse out fields
for field in root.findall('./Fields/Field'):
name = field.attrib['name']
strType = field.attrib['type'];
newField = fieldDesc(name, strType)
newField.setPrefix(protocol.prefix)
if('format' in field.attrib):
format = field.attrib['format'].lower()
if not format in formatDict:
print( "INVALID FORMAT :" + format)
newField.format = formatDict[format]
if('desc' in field.attrib):
newField.desc = field.attrib['desc']
if(name in protocol.fields):
print( 'ERROR Duplicate Field Name!: ' + name)
#get vals if any
for val in field.findall('./Val'):
name = val.attrib['name']
newVal = fieldVal(name)
if('desc' in val.attrib):
newVal.desc = val.attrib['desc']
newField.addVal(newVal)
protocol.addField(newField)
#get all packet types
for packet in root.findall('./Packets/Packet'):
name = packet.attrib['name']
desc =""
newPacket = packetDesc(name, protocol)
newPacket.setPrefix(protocol.prefix)
if(name in protocol.packetIdx):
print( 'ERROR Duplicate Packet Name!: ' + name)
if('desc' in packet.attrib):
desc = packet.attrib['desc']
if('response' in packet.attrib):
newPacket.requests[packet.attrib['response']] = 0
#get all fields declared for packet
for pfield in packet:
pfname = pfield.attrib['name']
strReq =""
if not (pfname in protocol.fieldIdx):
print( 'ERROR Field not declared: ' + pfname)
#get id of field and make a copy
idx = protocol.fieldIdx[pfname]
fieldCopy = copy.copy(protocol.fields[idx])
if('req' in pfield.attrib):
strReq = pfield.attrib['req']
if(strReq.lower() == "true" ):
fieldCopy.isRequired = True
if('desc' in pfield.attrib):
fieldCopy.desc = pfield.attrib['desc']
newPacket.addField(fieldCopy)
newPacket.desc = desc
protocol.addPacket(newPacket)
#get all packet types
for struct in root.findall('./Structs/Struct'):
name = struct.attrib['name']
desc =""
newStruct = packetDesc(name, protocol)
if(name in protocol.structIdx):
print( 'ERROR Duplicate Struct Name!: ' + name)
if('desc' in packet.attrib):
desc = packet.attrib['desc']
#get all fields declared for packet
for pfield in struct:
pfname = pfield.attrib['name']
strReq =""
if not (pfname in protocol.fieldIdx):
print( 'ERROR Field not declared: ' + pfname)
#get id of field and make a copy
idx = protocol.fieldIdx[pfname]
fieldCopy = copy.copy(protocol.fields[idx])
if('desc' in pfield.attrib):
fieldCopy.desc = pfield.attrib['desc']
newStruct.addField(fieldCopy)
newStruct.desc = desc
protocol.addStruct(newStruct)
for packet in protocol.packets:
for request in packet.requests:
idx = protocol.packetIdx[request]
protocol.packets[idx].respondsTo[packet.name] = 0
for packet in protocol.packets:
packet.postProcess()
# return news items list
return protocol
def parseYAMLField(protocol, fieldItem):
name = list(fieldItem.keys())[0]
field = list(fieldItem.values())[0]
strType = field['type'].replace("(","[").replace(")","]");
newField = fieldDesc(name, strType)
newField.setPrefix(protocol.prefix)
if('format' in field):
format = field['format'].lower()
if not format in formatDict:
print( "INVALID FORMAT :" + format)
newField.format = formatDict[format]
if 'req' in field:
newField.isRequired = field['req']
if 'required' in field:
newField.isRequired = field['required']
if('desc' in field):
newField.desc = field['desc']
if(name in protocol.fields):
print( 'ERROR Duplicate Field Name!: ' + name)
#get vals if any
if "vals" in field:
for valItem in field['vals']:
if type(valItem) is dict:
name = list(valItem.keys())[0]
val = list(valItem.values())[0]
else:
name = valItem
val = {}
newVal = fieldVal(name)
if('val' in val):
newVal.val = val['val']
if('desc' in val):
newVal.desc = val['desc']
newField.addVal(newVal)
protocol.addField(newField)
return newField
def parseYAML(yamlFile):
data = open(yamlFile)
objProtocol = yaml.load(data , Loader=yaml.FullLoader)
protocol = protocolDesc(objProtocol['name'])
if "prefix" in objProtocol:
protocol.prefix = objProtocol['prefix']
if "desc" in objProtocol:
protocol.desc = objProtocol['desc']
if "defaultResponse" in objProtocol:
protocol.defaultResponse = objProtocol['defaultResponse']
addStandardPackets(protocol)
protocol.xmlName = os.path.basename(yamlFile)
for fieldItem in objProtocol['fields']:
nodeType = list(fieldItem.values())[0]
#all fields must have a 'type', so if it doesnt, then it is a field group
if not 'type' in list(fieldItem.values())[0]:
groupName = list(fieldItem.keys())[0]
fieldGroupItems = list(fieldItem.values())[0]
groupFields = []
for fieldGroupItem in fieldGroupItems:
newField = parseYAMLField(protocol, fieldGroupItem)
groupFields.append(newField.name)
protocol.addGroup(groupName, groupFields)
else:
parseYAMLField(protocol, fieldItem)
if 'structs' in objProtocol:
for structItem in objProtocol['structs']:
name = list(structItem.keys())[0]
struct = list(structItem.values())[0]
desc =""
newStruct = packetDesc(name,protocol)
if(name in protocol.structIdx):
print( 'ERROR Duplicate Struct Name!: ' + name)
if('desc' in struct):
desc = struct['desc']
#get all fields declared for packet
if "fields" in struct:
for pfieldItem in struct['fields']:
if type(pfieldItem) is dict:
pfname = list(pfieldItem.keys())[0]
pfield = list(pfieldItem.values())[0]
else:
pfname = pfieldItem
pfield = {}
if pfname in protocol.fieldGroups:
for pfFieldGroupItem in protocol.fieldGroups[pfname]:
newStruct.addYAMLField(pfFieldGroupItem)
else:
newStruct.addYAMLField(pfieldItem)
newStruct.desc = desc
protocol.addStruct(newStruct)
if 'packets' in objProtocol:
for packetItem in objProtocol['packets']:
name = list(packetItem.keys())[0]
packet = list(packetItem.values())[0]
desc =""
newPacket = packetDesc(name, protocol)
newPacket.setPrefix(protocol.prefix)
if(name in protocol.packetIdx):
print( 'ERROR Duplicate Packet Name!: ' + name)
if('desc' in packet):
desc = packet['desc']
if('response' in packet):
if (packet['response'] != "none"):
newPacket.requests[packet['response']] = 0
else:
if not protocol.defaultResponse == "" and not protocol.defaultResponse == newPacket.name :
newPacket.requests[protocol.defaultResponse] = 0
#get all fields declared for packet
if "fields" in packet:
for pfieldItem in packet['fields']:
if type(pfieldItem) is dict:
pfname = list(pfieldItem.keys())[0]
pfield = list(pfieldItem.values())[0]
else:
pfname = pfieldItem
pfield = {}
if pfname in protocol.fieldGroups:
for pfFieldGroupItem in protocol.fieldGroups[pfname]:
newPacket.addYAMLField(pfFieldGroupItem)
else:
newPacket.addYAMLField(pfieldItem)
newPacket.desc = desc
protocol.addPacket(newPacket)
if 'sims' in objProtocol: #experimental
for simItem in objProtocol['sims']:
name = list(simItem.keys())[0]
sim = list(simItem.values())[0]
protocol.sims[name] = simulator(name,sim)
for packet in protocol.packets:
for request in packet.requests.keys():
protocol.getPacket(request).respondsTo[packet.name] = 0
for packet in protocol.packets:
packet.postProcess()
# return news items list
return protocol
def buildProtocol(file):
extension = os.path.splitext(file)[1]
if(extension == ".xml"):
print(" XML files are depreciated. Please convert to YAML for future use")
return parseXML(file)
elif(extension == ".yml"):
return parseYAML(file)
else:
print(" Files Type: " + extension+" Not supported. Please use YAML")
return 0
|
[
"xml.etree.ElementTree.parse",
"yaml.load",
"io.StringIO",
"os.path.basename",
"copy.copy",
"os.path.splitext",
"re.search",
"zlib.crc32"
] |
[((15727, 15744), 'xml.etree.ElementTree.parse', 'ET.parse', (['xmlfile'], {}), '(xmlfile)\n', (15735, 15744), True, 'import xml.etree.ElementTree as ET\n'), ((15910, 15935), 'os.path.basename', 'os.path.basename', (['xmlfile'], {}), '(xmlfile)\n', (15926, 15935), False, 'import os\n'), ((21255, 21294), 'yaml.load', 'yaml.load', (['data'], {'Loader': 'yaml.FullLoader'}), '(data, Loader=yaml.FullLoader)\n', (21264, 21294), False, 'import yaml\n'), ((21683, 21709), 'os.path.basename', 'os.path.basename', (['yamlFile'], {}), '(yamlFile)\n', (21699, 21709), False, 'import os\n'), ((1553, 1579), 'zlib.crc32', 'zlib.crc32', (['eachLine', 'prev'], {}), '(eachLine, prev)\n', (1563, 1579), False, 'import zlib\n'), ((2843, 2876), 're.search', 're.search', (['"""\\\\*([0-9]*)"""', 'strType'], {}), "('\\\\*([0-9]*)', strType)\n", (2852, 2876), False, 'import re\n'), ((5283, 5296), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5294, 5296), False, 'import io\n'), ((7359, 7395), 'copy.copy', 'copy.copy', (['self.protocol.fields[idx]'], {}), '(self.protocol.fields[idx])\n', (7368, 7395), False, 'import copy\n'), ((8283, 8296), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8294, 8296), False, 'import io\n'), ((9550, 9563), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9561, 9563), False, 'import io\n'), ((9585, 9598), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9596, 9598), False, 'import io\n'), ((9620, 9633), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9631, 9633), False, 'import io\n'), ((9654, 9667), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9665, 9667), False, 'import io\n'), ((25854, 25876), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (25870, 25876), False, 'import os\n'), ((18043, 18074), 'copy.copy', 'copy.copy', (['protocol.fields[idx]'], {}), '(protocol.fields[idx])\n', (18052, 18074), False, 'import copy\n'), ((19235, 19266), 'copy.copy', 'copy.copy', (['protocol.fields[idx]'], {}), '(protocol.fields[idx])\n', (19244, 19266), False, 'import copy\n')]
|
"""Testing for Showalter Index only. While MetPy handles all five parameters,
the Showalter Index was contributed to MetPy by the GeoCAT team because of the
skewt_params function. Additionally, a discrepancy between NCL and MetPy
calculations of CAPE has been identified. After validating the CAPE value by
hand using the method outlined in Hobbs 2006, it was determined that the MetPy
calculation was closer to the CAPE value than the NCL calculation. To overcome
any issues with validating the dataset, it was decided that skewt_params would
only test against the Showalter Index for validation and not against all five
parameters.
Citation:
<NAME>., and <NAME>, 2006:
Atmospheric Science: An Introductory Survey. 2nd ed. Academic Press,
pg 345
"""
import sys
import metpy.calc as mpcalc
import numpy as np
import xarray as xr
import numpy.testing as nt
from metpy.units import units
import geocat.datafiles as gdf
import pandas as pd
# Import from directory structure if coverage test, or from installed
# packages otherwise
if "--cov" in str(sys.argv):
from src.geocat.comp import get_skewt_vars, showalter_index
else:
from geocat.comp import get_skewt_vars, showalter_index
ds = pd.read_csv(gdf.get('ascii_files/sounding.testdata'),
delimiter='\\s+',
header=None)
# get ground truth from ncl run netcdf file
try:
out = xr.open_dataset(
"skewt_params_output.nc"
) # Generated by running ncl_tests/skewt_params_test.ncl
except:
out = xr.open_dataset("test/skewt_params_output.nc")
# Extract the data from ds
p = ds[1].values * units.hPa # Pressure [mb/hPa]
tc = (ds[5].values + 2) * units.degC # Temperature [C]
tdc = ds[9].values * units.degC # Dew pt temp [C]
pro = mpcalc.parcel_profile(p, tc[0], tdc[0]).to('degC')
# Extract Showalter Index from NCL out file and convert to int
Shox = np.round(out['Shox']) # Use np.round to avoid rounding issues
NCL_shox = int(Shox[0]) # Convert to int
def test_shox_vals():
# Showalter index
shox = showalter_index(p, tc, tdc)
shox = shox[0].magnitude
# Place calculated values in iterable list
vals = np.round(shox).astype(int)
# Compare calculated values with expected
nt.assert_equal(vals, NCL_shox)
def test_get_skewt_vars():
"""With respect to the note in test_vars, the MetPy calculated values for
Plcl, Tlcl, Pwat, and CAPE along with the tested value for Showalter Index
are pre-defined in this test.
This test is to ensure that the values of each are being read,
assigned, and placed correctly in get_skewt_vars.
"""
expected = 'Plcl= 927 Tlcl[C]= 24 Shox= 3 Pwat[cm]= 5 Cape[J]= 2958'
result = get_skewt_vars(p, tc, tdc, pro)
nt.assert_equal(result, expected)
|
[
"metpy.calc.parcel_profile",
"xarray.open_dataset",
"geocat.datafiles.get",
"geocat.comp.showalter_index",
"geocat.comp.get_skewt_vars",
"numpy.testing.assert_equal",
"numpy.round"
] |
[((1864, 1885), 'numpy.round', 'np.round', (["out['Shox']"], {}), "(out['Shox'])\n", (1872, 1885), True, 'import numpy as np\n'), ((1207, 1247), 'geocat.datafiles.get', 'gdf.get', (['"""ascii_files/sounding.testdata"""'], {}), "('ascii_files/sounding.testdata')\n", (1214, 1247), True, 'import geocat.datafiles as gdf\n'), ((1373, 1414), 'xarray.open_dataset', 'xr.open_dataset', (['"""skewt_params_output.nc"""'], {}), "('skewt_params_output.nc')\n", (1388, 1414), True, 'import xarray as xr\n'), ((2027, 2054), 'geocat.comp.showalter_index', 'showalter_index', (['p', 'tc', 'tdc'], {}), '(p, tc, tdc)\n', (2042, 2054), False, 'from geocat.comp import get_skewt_vars, showalter_index\n'), ((2221, 2252), 'numpy.testing.assert_equal', 'nt.assert_equal', (['vals', 'NCL_shox'], {}), '(vals, NCL_shox)\n', (2236, 2252), True, 'import numpy.testing as nt\n'), ((2690, 2721), 'geocat.comp.get_skewt_vars', 'get_skewt_vars', (['p', 'tc', 'tdc', 'pro'], {}), '(p, tc, tdc, pro)\n', (2704, 2721), False, 'from geocat.comp import get_skewt_vars, showalter_index\n'), ((2726, 2759), 'numpy.testing.assert_equal', 'nt.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (2741, 2759), True, 'import numpy.testing as nt\n'), ((1503, 1549), 'xarray.open_dataset', 'xr.open_dataset', (['"""test/skewt_params_output.nc"""'], {}), "('test/skewt_params_output.nc')\n", (1518, 1549), True, 'import xarray as xr\n'), ((1742, 1781), 'metpy.calc.parcel_profile', 'mpcalc.parcel_profile', (['p', 'tc[0]', 'tdc[0]'], {}), '(p, tc[0], tdc[0])\n', (1763, 1781), True, 'import metpy.calc as mpcalc\n'), ((2143, 2157), 'numpy.round', 'np.round', (['shox'], {}), '(shox)\n', (2151, 2157), True, 'import numpy as np\n')]
|
"""
Module for dealing with combined features
"""
from __future__ import absolute_import
import pandas as pd
from . import correlation_convertion
from . import wavelet_classification
def load(segment_files, **kwargs):
"""Loads the multiple features from segment_files and concatenate them to a single dataframe. The feature loader
to use is based on the file name. If the path contains 'wavelet', the wavelet feature loader will be used. If it
contains 'corr' the cross-correlation feature loader will be used."""
print("Loading files with kwargs: ", kwargs)
dataframes = []
for segment_file in segment_files:
if 'wavelet' in segment_file:
dataframes.append(wavelet_classification.load_csv(segment_file))
elif 'corr' in segment_file:
dataframes.append(correlation_convertion.load_and_pivot(segment_file))
else:
raise NotImplementedError("Don't know which feature loading function to use for {}.".format(segment_file))
return pd.concat(dataframes, axis=1)
|
[
"pandas.concat"
] |
[((1017, 1046), 'pandas.concat', 'pd.concat', (['dataframes'], {'axis': '(1)'}), '(dataframes, axis=1)\n', (1026, 1046), True, 'import pandas as pd\n')]
|
import argparse
import os
import shutil
import utils_bg
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--fg_type",
"-fg_type",
default="frame",
choices=["frame", "logo"],
type=str,
help="frame or logo",
)
parser.add_argument(
"--fg_pool",
"-fg_pool",
default="/ldap_home/kailin.chen/product_segmentation/datasets/synthetic/logo_pool_sp/val/",
type=str,
help="frame pool or logo pool dir.",
)
parser.add_argument(
"--save_bg_dir",
"-save_bg_dir",
default="/ldap_home/kailin.chen/product_segmentation/datasets/synthetic/bg_pool/bg_logo_text/val/",
type=str,
help="new image without alpha channel save dir.",
)
parser.add_argument(
"--save_fg_dir",
"-save_fg_dir",
default="/ldap_home/kailin.chen/product_segmentation/datasets/synthetic/logo_pool_sp/val_aug_text/",
type=str,
help="new image with alpha channel save dir.",
)
args = parser.parse_args()
if os.path.isdir(args.save_bg_dir): # clear the save folder
shutil.rmtree(args.save_bg_dir)
os.mkdir(args.save_bg_dir)
if os.path.isdir(args.save_fg_dir):
shutil.rmtree(args.save_fg_dir)
os.mkdir(args.save_fg_dir)
syner = utils_bg.Synthesizer(
fg_pool=args.fg_pool, save_bg_dir=args.save_bg_dir, savedir=args.save_fg_dir
)
if args.fg_type == "frame":
syner.gen_frame() # for frame
elif args.fg_type == "logo":
syner.gen_logo_text() # for logo and text
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.path.isdir",
"shutil.rmtree",
"utils_bg.Synthesizer"
] |
[((97, 122), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (120, 122), False, 'import argparse\n'), ((1104, 1135), 'os.path.isdir', 'os.path.isdir', (['args.save_bg_dir'], {}), '(args.save_bg_dir)\n', (1117, 1135), False, 'import os\n'), ((1206, 1232), 'os.mkdir', 'os.mkdir', (['args.save_bg_dir'], {}), '(args.save_bg_dir)\n', (1214, 1232), False, 'import os\n'), ((1240, 1271), 'os.path.isdir', 'os.path.isdir', (['args.save_fg_dir'], {}), '(args.save_fg_dir)\n', (1253, 1271), False, 'import os\n'), ((1317, 1343), 'os.mkdir', 'os.mkdir', (['args.save_fg_dir'], {}), '(args.save_fg_dir)\n', (1325, 1343), False, 'import os\n'), ((1357, 1459), 'utils_bg.Synthesizer', 'utils_bg.Synthesizer', ([], {'fg_pool': 'args.fg_pool', 'save_bg_dir': 'args.save_bg_dir', 'savedir': 'args.save_fg_dir'}), '(fg_pool=args.fg_pool, save_bg_dir=args.save_bg_dir,\n savedir=args.save_fg_dir)\n', (1377, 1459), False, 'import utils_bg\n'), ((1170, 1201), 'shutil.rmtree', 'shutil.rmtree', (['args.save_bg_dir'], {}), '(args.save_bg_dir)\n', (1183, 1201), False, 'import shutil\n'), ((1281, 1312), 'shutil.rmtree', 'shutil.rmtree', (['args.save_fg_dir'], {}), '(args.save_fg_dir)\n', (1294, 1312), False, 'import shutil\n')]
|
import calendar
import matplotlib.pyplot as plt
calendar.setfirstweekday(6) # Sunday is 1st day in US
w_days = 'Sun Mon Tue Wed Thu Fri Sat'.split()
m_names = 'January February March April May June July August September October November December'.split()
class MplCalendar(object):
def __init__(self, year, month):
self.year = year
self.month = month
self.cal = calendar.monthcalendar(year, month)
# monthcalendar creates a list of lists for each week
# Save the events data in the same format
self.events = [[[] for day in week] for week in self.cal]
def _monthday_to_index(self, day):
"""The index of the day in the list of lists"""
for week_n, week in enumerate(self.cal):
try:
i = week.index(day)
return week_n, i
except ValueError:
pass
# couldn't find the day
raise ValueError("There aren't {} days in the month".format(day))
def add_event(self, day, event_str):
"""insert a string into the events list for the specified day"""
week, w_day = self._monthday_to_index(day)
self.events[week][w_day].append(event_str)
def show(self):
"""create the calendar"""
f, axs = plt.subplots(len(self.cal), 7, sharex=True, sharey=True)
for week, ax_row in enumerate(axs):
for week_day, ax in enumerate(ax_row):
ax.set_xticks([])
ax.set_yticks([])
if self.cal[week][week_day] != 0:
ax.text(.02, .98,
str(self.cal[week][week_day]),
verticalalignment='top',
horizontalalignment='left')
contents = "\n".join(self.events[week][week_day])
ax.text(.03, .85, contents,
verticalalignment='top',
horizontalalignment='left',
fontsize=9)
# use the titles of the first row as the weekdays
for n, day in enumerate(w_days):
axs[0][n].set_title(day)
# Place subplots in a close grid
f.subplots_adjust(hspace=0)
f.subplots_adjust(wspace=0)
f.suptitle(m_names[self.month - 1] + ' ' + str(self.year),
fontsize=20, fontweight='bold')
plt.show()
feb = MplCalendar(2017, 2) #2017, February
feb.show()
march = MplCalendar(1999, 7)
march.show()
|
[
"calendar.monthcalendar",
"calendar.setfirstweekday",
"matplotlib.pyplot.show"
] |
[((49, 76), 'calendar.setfirstweekday', 'calendar.setfirstweekday', (['(6)'], {}), '(6)\n', (73, 76), False, 'import calendar\n'), ((392, 427), 'calendar.monthcalendar', 'calendar.monthcalendar', (['year', 'month'], {}), '(year, month)\n', (414, 427), False, 'import calendar\n'), ((2385, 2395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2393, 2395), True, 'import matplotlib.pyplot as plt\n')]
|
# INCOMPLETE
# uncomment the evaluate script
# # read the log.txt and extract the rewards per episode
# script to run the models for eval for 1000 episodes
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('returns_MiniGrid-DistShift2-v0', header=None)
df = df.sort_values(by=0, ignore_index=True)
# plt.figure(figsize=(10,10))
plt.clf()
plt.grid()
plt.errorbar(np.arange(len(df)), df[1], df[2], fmt='ok', lw=5)
plt.errorbar(np.arange(len(df)), df[1], [df[1] - df[3], df[4] - df[1]],
fmt='.k', ecolor='gray', lw=2)
plt.xticks(np.arange(len(df)), df[0], rotation=0)
plt.title('DistShift2 Eval returns per episode (for 1000 episodes)')
plt.ylabel('Returns per episode')
plt.xlabel('Models with different SHAP regularizer coeff')
plt.savefig('plot_eval.png')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((233, 291), 'pandas.read_csv', 'pd.read_csv', (['"""returns_MiniGrid-DistShift2-v0"""'], {'header': 'None'}), "('returns_MiniGrid-DistShift2-v0', header=None)\n", (244, 291), True, 'import pandas as pd\n'), ((368, 377), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (375, 377), True, 'import matplotlib.pyplot as plt\n'), ((378, 388), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (386, 388), True, 'import matplotlib.pyplot as plt\n'), ((618, 686), 'matplotlib.pyplot.title', 'plt.title', (['"""DistShift2 Eval returns per episode (for 1000 episodes)"""'], {}), "('DistShift2 Eval returns per episode (for 1000 episodes)')\n", (627, 686), True, 'import matplotlib.pyplot as plt\n'), ((687, 720), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Returns per episode"""'], {}), "('Returns per episode')\n", (697, 720), True, 'import matplotlib.pyplot as plt\n'), ((721, 779), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Models with different SHAP regularizer coeff"""'], {}), "('Models with different SHAP regularizer coeff')\n", (731, 779), True, 'import matplotlib.pyplot as plt\n'), ((780, 808), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_eval.png"""'], {}), "('plot_eval.png')\n", (791, 808), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Collection of constants tuples
"""
__author__ = '<NAME>'
from types import MappingProxyType
# dicts:
EVENT_SERVICE_JOB_TYPES = MappingProxyType({
1: 'eventservice',
2: 'esmerge',
3: 'clone',
4: 'jumbo',
5: 'cojumbo',
})
# lists
JOB_STATES = (
'pending',
'defined',
'waiting',
'assigned',
'throttled',
'activated',
'sent',
'starting',
'running',
'holding',
'transferring',
'merging',
'finished',
'failed',
'cancelled',
'closed'
)
JOB_STATES_SITE = (
'defined',
'waiting',
'assigned',
'throttled',
'activated',
'sent',
'starting',
'running',
'holding',
'merging',
'transferring',
'finished',
'failed',
'cancelled',
'closed'
)
JOB_STATES_FINAL = (
'finished',
'failed',
'cancelled',
'closed',
'merging'
)
RESOURCE_CAPABILITIES = (
'SCORE',
'MCORE',
'SCORE_HIMEM',
'MCORE_HIMEM'
)
EVENT_STATES = (
'ready',
'sent',
'running',
'finished',
'cancelled',
'discarded',
'done',
'failed',
'fatal',
'merged',
'corrupted',
)
TASK_STATES = (
'registered',
'defined',
'assigning',
'ready',
'pending',
'scouting',
'scouted',
'running',
'prepared',
'done',
'failed',
'finished',
'aborting',
'aborted',
'finishing',
'topreprocess',
'preprocessing',
'tobroken',
'broken',
'toretry',
'toincexec',
'rerefine'
)
TASK_STATES_FINAL = (
'broken',
'aborted',
'done',
'finished',
'failed'
)
JOB_FIELDS_ERROR_VIEW = (
'cloud',
'computingelement',
'computingsite',
'eventservice',
'jeditaskid',
'jobstatus',
'processingtype',
'prodsourcelabel',
'produsername',
'specialhandling',
'taskid',
'transformation',
'reqid',
'workinggroup',
)
JOB_ERROR_CATEGORIES = (
{'name': 'brokerage', 'error': 'brokerageerrorcode', 'diag': 'brokerageerrordiag', 'title': 'Brokerage error'},
{'name': 'ddm', 'error': 'ddmerrorcode', 'diag': 'ddmerrordiag', 'title': 'DDM error'},
{'name': 'exe', 'error': 'exeerrorcode', 'diag': 'exeerrordiag', 'title': 'Executable error'},
{'name': 'jobdispatcher', 'error': 'jobdispatchererrorcode', 'diag': 'jobdispatchererrordiag',
'title': 'Dispatcher error'},
{'name': 'pilot', 'error': 'piloterrorcode', 'diag': 'piloterrordiag', 'title': 'Pilot error'},
{'name': 'sup', 'error': 'superrorcode', 'diag': 'superrordiag', 'title': 'Sup error'},
{'name': 'taskbuffer', 'error': 'taskbuffererrorcode', 'diag': 'taskbuffererrordiag', 'title': 'Task buffer error'},
{'name': 'transformation', 'error': 'transexitcode', 'diag': None, 'title': 'Trf exit code'},
)
JOB_FIELDS_STANDARD = (
'processingtype',
'computingsite',
'jobstatus',
'prodsourcelabel',
'produsername',
'jeditaskid',
'workinggroup',
'transformation',
'cloud',
'homepackage',
'inputfileproject',
'inputfiletype',
'attemptnr',
'specialhandling',
'priorityrange',
'reqid',
'minramcount',
'eventservice',
'jobsubstatus',
'nucleus',
'gshare',
'resourcetype'
)
SITE_FIELDS_STANDARD = (
'region',
'gocname',
'nickname',
'status',
'tier',
'comment_field',
'cloud',
'allowdirectaccess',
'allowfax',
'copytool',
'faxredirector',
'retry',
'timefloor'
)
TASK_FIELDS_STANDARD = (
'workqueue_id',
'tasktype',
'superstatus',
'status',
'corecount',
'taskpriority',
'currentpriority',
'username',
'transuses',
'transpath',
'workinggroup',
'processingtype',
'cloud',
'campaign',
'project',
'stream',
'tag',
'reqid',
'ramcount',
'nucleus',
'eventservice',
'gshare',
'container_name',
'attemptnr',
'site'
)
|
[
"types.MappingProxyType"
] |
[((134, 237), 'types.MappingProxyType', 'MappingProxyType', (["{(1): 'eventservice', (2): 'esmerge', (3): 'clone', (4): 'jumbo', (5):\n 'cojumbo'}"], {}), "({(1): 'eventservice', (2): 'esmerge', (3): 'clone', (4):\n 'jumbo', (5): 'cojumbo'})\n", (150, 237), False, 'from types import MappingProxyType\n')]
|
import grblas
import numba
import numpy as np
from typing import Union, Tuple
from .container import Flat, Pivot
from .schema import SchemaMismatchError
from .oputils import jitted_op
class SizeMismatchError(Exception):
pass
# Sentinel to indicate the fill values come from the object to which we are aligning
_fill_like = object()
def align(a: Union[Flat, Pivot], b: Union[Flat, Pivot], op=None, afill=None, bfill=None):
"""
Dispatched to align_pivots if both a and b and Pivots.
Otherwise dispatches to align_flats, converting any Pivots to Flats using .flatten()
"""
if a.schema is not b.schema:
raise SchemaMismatchError("Objects have different schemas")
if afill is not None and afill is b:
afill = _fill_like
if bfill is not None and bfill is a:
bfill = _fill_like
a_type = type(a)
b_type = type(b)
if a_type is Pivot and b_type is Pivot:
return align_pivots(a, b, op=op, afill=afill, bfill=bfill)
elif a_type is Flat and b_type is Flat:
return align_flats(a, b, op=op, afill=afill, bfill=bfill)
elif a_type is Pivot:
return align_flats(a.flatten(), b, op=op, afill=afill, bfill=bfill)
else:
return align_flats(a, b.flatten(), op=op, afill=afill, bfill=bfill)
def align_flats(a: Flat, b: Flat, op=None, afill=None, bfill=None):
"""
Aligns two Flats, returning two Pivots with matching left and top dimensions.
If a and b are already aligned, returns Flats instead of Pivots.
If op is provided, returns a single Pivot. Otherwise returns a 2-tuple of Pivots
afill and bfill are used to determine the kind of alignment
afill=None, bfill=None -> inner join
afill!=None, bfill=None -> left join
afill=None, bfill!=None -> right join
afill!=None, bfill!=None -> outer join
:param a: Flat
:param b: Flat
:param op: grblas.BinaryOp (default None)
:param afill: scalar or Flat (default None)
:param bfill: scalar or Flat (default None)
:return: Pivot or (Pivot, Pivot)
"""
if a.schema is not b.schema:
raise SchemaMismatchError("Objects have different schemas")
if afill is not None and afill is b:
afill = _fill_like
if bfill is not None and bfill is a:
bfill = _fill_like
# Determine which object is a subset of the other, or if they are fully disjoint
mismatched_dims = a.dims ^ b.dims
if not mismatched_dims:
result = _already_aligned_flats(a, b, op, afill, bfill)
elif a.dims - b.dims == mismatched_dims: # b is the subset
a = a.pivot(top=mismatched_dims)
result = _align_subset(a, b, op, afill, bfill)
elif b.dims - a.dims == mismatched_dims: # a is the subset
b = b.pivot(top=mismatched_dims)
result = _align_subset(b, a, op, bfill, afill, reversed=True)
else: # disjoint
matched_dims = a.dims & b.dims
if matched_dims: # partial disjoint
a = a.pivot(left=matched_dims)
b = b.pivot(left=matched_dims)
result = _align_partial_disjoint(a, b, op, afill, bfill)
else: # full disjoint
result = _align_fully_disjoint(a, b, op)
return result
def align_pivots(a: Pivot, b: Pivot, op=None, afill=None, bfill=None):
"""
Aligns two Pivots, returning two Pivots with matching left and top dimensions.
If op is provided, returns a single Pivot. Otherwise returns a 2-tuple of Pivots
afill and bfill are used to determine the kind of alignment
afill=None, bfill=None -> inner join
afill!=None, bfill=None -> left join
afill=None, bfill!=None -> right join
afill!=None, bfill!=None -> outer join
:param a: Pivot
:param b: Pivot
:param op: grblas.BinaryOp (default None)
:param afill: scalar or Flat (default None)
:param bfill: scalar or Flat (default None)
:return: Pivot or (Pivot, Pivot)
"""
if a.schema is not b.schema:
raise SchemaMismatchError("Objects have different schemas")
if afill is not None and afill is b:
afill = _fill_like
if bfill is not None and bfill is a:
bfill = _fill_like
# Determine which object is a subset of the other, or if they are fully disjoint
a_dims = a.left | a.top
b_dims = b.left | b.top
mismatched_dims = a_dims ^ b_dims
if not mismatched_dims:
result = _already_aligned_pivots(a, b.pivot(left=a.left), op, afill, bfill)
elif a_dims - b_dims == mismatched_dims: # b is the subset
a = a.pivot(top=mismatched_dims)
result = _align_subset(a, b.flatten(), op, afill, bfill)
elif b_dims - a_dims == mismatched_dims: # a is the subset
b = b.pivot(top=mismatched_dims)
result = _align_subset(b, a.flatten(), op, bfill, afill, reversed=True)
else: # disjoint
matched_dims = a_dims & b_dims
if matched_dims: # partial disjoint
a = a.pivot(left=matched_dims)
b = b.pivot(left=matched_dims)
result = _align_partial_disjoint(a, b, op, afill, bfill)
else: # full disjoint
result = _align_fully_disjoint(a.flatten(), b.flatten(), op)
return result
def _already_aligned_flats(a: Flat, b: Flat, op=None, afill=None, bfill=None) -> Union[Flat, Tuple[Flat, Flat]]:
"""
a.dims must equal b.dims
"""
assert a.dims == b.dims, f"Mismatching dimensions {a.dims ^ b.dims}"
# Create a2 and b2 as expanded, filled vectors
a2, b2 = a.vector, b.vector
if afill is _fill_like:
a2 = a2.dup()
a2(~a2.S) << b2
elif afill is not None:
a2 = grblas.Vector.new(a2.dtype, size=a2.size)
a2(b2.S) << afill
a2(a.vector.S) << a.vector
if bfill is _fill_like:
b2 = b2.dup()
b2(~b2.S) << a2
elif bfill is not None:
b2 = grblas.Vector.new(b2.dtype, size=b2.size)
b2(a2.S) << bfill
b2(b.vector.S) << b.vector
# Handle op
if op is None:
return Flat(a2, a.schema, a.dims), Flat(b2, b.schema, b.dims)
else:
result = a2.ewise_mult(b2, op=op)
return Flat(result.new(), a.schema, a.dims)
def _already_aligned_pivots(a: Pivot, b: Pivot, op=None, afill=None, bfill=None) -> Union[Pivot, Tuple[Pivot, Pivot]]:
"""
a.left must equal b.left
a.top must equal b.top
"""
assert a.left == b.left, f"Mismatching left dimensions {a.left ^ b.left}"
assert a.top == b.top, f"Mismatching top dimensions {a.top ^ b.top}"
# Create a2 and b2 as expanded, filled matrices
a2, b2 = a.matrix, b.matrix
if afill is _fill_like:
a2 = a2.dup()
a2(~a2.S) << b2
elif afill is not None:
a2 = grblas.Matrix.new(a2.dtype, nrows=a2.nrows, ncols=a2.ncols)
a2(b2.S) << afill
a2(a.matrix.S) << a.matrix
if bfill is _fill_like:
b2 = b2.dup()
b2(~b2.S) << a2
elif bfill is not None:
b2 = grblas.Matrix.new(b2.dtype, nrows=b2.nrows, ncols=b2.ncols)
b2(a2.S) << bfill
b2(b.matrix.S) << b.matrix
# Handle op
if op is None:
return Pivot(a2, a.schema, a.left, a.top), Pivot(b2, b.schema, b.left, b.top)
else:
result = a2.ewise_mult(b2, op=op)
return Pivot(result.new(), a.schema, a.left, a.top)
def _align_subset(x: Pivot, sub: Flat, op=None, afill=None, bfill=None, reversed=False) -> Union[Pivot, Tuple[Pivot, Pivot]]:
"""
x must have mismatched dims on top
sub must have dims exactly matching x.left
"""
x2 = x.matrix
size = sub.vector.size
if x2.nrows != size:
raise SizeMismatchError(f"nrows {x2.nrows} != size {size}")
# Convert sub's values into the diagonal of a matrix
index, vals = sub.vector.to_values()
diag = grblas.Matrix.from_values(index, index, vals, nrows=size, ncols=size)
# Multiply the diagonal matrix by the shape of x (any_first will only take values from diag)
# This performs a broadcast of sub's values to the corresponding locations in x
y2 = diag.mxm(x2, grblas.semiring.any_first).new()
# mxm is an intersection operation, so mismatched codes are missing in m_broadcast
if op is None or afill is not None:
# Check if sub contained more rows than are present in m_broadcast
v_x = y2.reduce_rows(grblas.monoid.any).new()
if v_x.nvals < sub.vector.nvals:
# Find mismatched codes and add them in with the NULL
v_x(~v_x.S, replace=True)[:] << sub.vector
# Update y2 with values lost from mxm
y2[:, 0] << v_x # Column 0 is the code for all_dims == NULL
if afill is not None:
# Fill corresponding elements of x2 if afill
if afill is not _fill_like:
v_x(v_x.S) << afill
x2 = x2.dup()
x2(v_x.S)[:, 0] << v_x
if bfill is _fill_like:
y2(~y2.S) << x2
elif bfill is not None:
ybackup = y2
y2 = grblas.Matrix.new(y2.dtype, nrows=y2.nrows, ncols=y2.ncols)
y2(x2.S) << bfill
y2(ybackup.S) << ybackup
# Handle op
if op is None:
x = Pivot(x2, x.schema, x.left, x.top)
y = Pivot(y2, x.schema, x.left, x.top)
return (y, x) if reversed else (x, y)
else:
result = y2.ewise_mult(x2, op=op) if reversed else x2.ewise_mult(y2, op=op)
return Pivot(result.new(), x.schema, x.left, x.top)
def _align_fully_disjoint(x: Flat, y: Flat, op=None) -> Union[Pivot, Tuple[Pivot, Pivot]]:
"""
x.dims must have no overlap with y.dims
"""
xm = grblas.Matrix.new(x.vector.dtype, x.vector.size, 1)
xm[:, 0] << x.vector
ym = grblas.Matrix.new(y.vector.dtype, y.vector.size, 1)
ym[:, 0] << y.vector
# Perform the cross-joins. Values from only a single input are used per calculation
xr = xm.mxm(ym.T, grblas.semiring.any_first)
yr = xm.mxm(ym.T, grblas.semiring.any_second)
if op is None:
return (
Pivot(xr.new(), x.schema, left=x.dims, top=y.dims),
Pivot(yr.new(), x.schema, left=x.dims, top=y.dims)
)
else:
result = xr.new()
result(accum=op) << yr
return Pivot(result, x.schema, left=x.dims, top=y.dims)
def _align_partial_disjoint(x: Pivot, y: Pivot, op=None, afill=None, bfill=None) -> Union[Pivot, Tuple[Pivot, Pivot]]:
"""
x.left must match y.left
x.top must have no overlap with y.top
"""
assert x.left == y.left
matched_dims = x.left
mismatched_dims = x.top | y.top
top_mask = x.schema.build_bitmask(mismatched_dims)
# Compute the size and offsets of the cross join computation
x1 = x.matrix.apply(grblas.unary.one).new().reduce_rows(grblas.monoid.plus['INT64']).new()
y1 = y.matrix.apply(grblas.unary.one).new().reduce_rows(grblas.monoid.plus['INT64']).new()
combo = x1.ewise_add(y1, grblas.monoid.times).new()
# Mask back into x1 and y1 to contain only what applies to each (unless filling to match)
if op is None:
xmask = x1.S if afill is None else None
ymask = y1.S if bfill is None else None
x1(mask=xmask) << combo
y1(mask=ymask) << combo
x1_size = int(x1.reduce().value)
y1_size = int(y1.reduce().value)
else: # op is provided, will only have a single return object
# Trim x1 to final size, then compute result_size
if afill is None and bfill is None: # intersecting values only
x1 << x1.ewise_mult(y1, grblas.monoid.times)
elif afill is None: # size same as x
x1(x1.S, replace=True) << combo
elif bfill is None: # size same as y
x1(y1.S, replace=True) << combo
else:
x1 = combo
result_size = int(x1.reduce().value)
combo_idx, combo_offset = combo.to_values()
# Extract input arrays in hypercsr format
xs = x.matrix.ss.export(format='hypercsr', sort=True)
xs_rows = xs['rows']
xs_indptr = xs['indptr']
xs_col_indices = xs['col_indices']
xs_values = xs['values']
ys = y.matrix.ss.export(format='hypercsr', sort=True)
ys_rows = ys['rows']
ys_indptr = ys['indptr']
ys_col_indices = ys['col_indices']
ys_values = ys['values']
if op is None:
# Build output data structures
r1_rows = np.zeros((x1_size,), dtype=np.uint64)
r1_cols = np.zeros((x1_size,), dtype=np.uint64)
r1_vals = np.zeros((x1_size,), dtype=xs['values'].dtype)
r2_rows = np.zeros((y1_size,), dtype=np.uint64)
r2_cols = np.zeros((y1_size,), dtype=np.uint64)
r2_vals = np.zeros((y1_size,), dtype=ys['values'].dtype)
_align_partial_disjoint_numba(
combo_idx,
xs_rows, xs_indptr, xs_col_indices, xs_values,
ys_rows, ys_indptr, ys_col_indices, ys_values,
r1_rows, r1_cols, r1_vals,
r2_rows, r2_cols, r2_vals,
afill is not None, bfill is not None,
afill if afill is not None and afill is not _fill_like else None,
bfill if bfill is not None and bfill is not _fill_like else None
)
return (
Pivot(grblas.Matrix.from_values(r1_rows, r1_cols, r1_vals, nrows=x.matrix.nrows, ncols=top_mask + 1),
x.schema, matched_dims, mismatched_dims),
Pivot(grblas.Matrix.from_values(r2_rows, r2_cols, r2_vals, nrows=x.matrix.nrows, ncols=top_mask + 1),
x.schema, matched_dims, mismatched_dims)
)
else:
unified_input_dtype = grblas.dtypes.unify(
grblas.dtypes.lookup_dtype(xs['values'].dtype),
grblas.dtypes.lookup_dtype(ys['values'].dtype)
)
output_dtype_str = op.types[grblas.dtypes.lookup_dtype(unified_input_dtype).name]
op = jitted_op(op)
# Build output data structures
r_rows = np.zeros((result_size,), dtype=np.uint64)
r_cols = np.zeros((result_size,), dtype=np.uint64)
r_vals = np.zeros((result_size,), dtype=grblas.dtypes.lookup_dtype(output_dtype_str).np_type)
_align_partial_disjoint_numba_op(
op, combo_idx,
xs_rows, xs_indptr, xs_col_indices, xs_values,
ys_rows, ys_indptr, ys_col_indices, ys_values,
r_rows, r_cols, r_vals,
afill is not None, bfill is not None,
afill if afill is not None and afill is not _fill_like else None,
bfill if bfill is not None and bfill is not _fill_like else None
)
return (
Pivot(grblas.Matrix.from_values(r_rows, r_cols, r_vals), x.schema, matched_dims, mismatched_dims)
)
@numba.njit
def _align_partial_disjoint_numba(
combo_idx,
xs_rows, xs_indptr, xs_col_indices, xs_values,
ys_rows, ys_indptr, ys_col_indices, ys_values,
r1_rows, r1_cols, r1_vals,
r2_rows, r2_cols, r2_vals,
fill_x, fill_y, # boolean
x_fillval, y_fillval, # scalar or None
):
# xi/yi are the current index of xs/ys, not necessarily in sync with combo_idx due to mismatched codes
xi = 0
yi = 0
xoffset = 0
yoffset = 0
for row in combo_idx:
# Find xrow and yrow, if available
xrow, yrow = -1, -1
if xi < len(xs_rows) and xs_rows[xi] == row:
xrow = xi
xi += 1
if yi < len(ys_rows) and ys_rows[yi] == row:
yrow = yi
yi += 1
# Iterate over x and y indices for this row
if xrow >= 0 and yrow >= 0:
for xj in range(xs_indptr[xrow], xs_indptr[xrow + 1]):
for yj in range(ys_indptr[yrow], ys_indptr[yrow + 1]):
r1_rows[xoffset] = row
r2_rows[yoffset] = row
col_idx = xs_col_indices[xj] + ys_col_indices[yj]
r1_cols[xoffset] = col_idx
r2_cols[yoffset] = col_idx
r1_vals[xoffset] = xs_values[xj]
r2_vals[yoffset] = ys_values[yj]
xoffset += 1
yoffset += 1
elif xrow >= 0:
for xj in range(xs_indptr[xrow], xs_indptr[xrow + 1]):
r1_rows[xoffset] = row
r1_cols[xoffset] = xs_col_indices[xj]
r1_vals[xoffset] = xs_values[xj]
xoffset += 1
if fill_y:
r2_rows[yoffset] = row
r2_cols[yoffset] = xs_col_indices[xj]
if y_fillval is None:
r2_vals[yoffset] = xs_values[xj]
else:
r2_vals[yoffset] = y_fillval
yoffset += 1
elif yrow >= 0:
for yj in range(ys_indptr[yrow], ys_indptr[yrow + 1]):
r2_rows[yoffset] = row
r2_cols[yoffset] = ys_col_indices[yj]
r2_vals[yoffset] = ys_values[yj]
yoffset += 1
if fill_x:
r1_rows[xoffset] = row
r1_cols[xoffset] = ys_col_indices[yj]
if x_fillval is None:
r1_vals[xoffset] = ys_values[yj]
else:
r1_vals[xoffset] = x_fillval
xoffset += 1
else:
raise Exception("Unhandled row")
@numba.njit
def _align_partial_disjoint_numba_op(
op, combo_idx,
xs_rows, xs_indptr, xs_col_indices, xs_values,
ys_rows, ys_indptr, ys_col_indices, ys_values,
r_rows, r_cols, r_vals,
fill_x, fill_y, # boolean
x_fillval, y_fillval, # scalar or None
):
# xi/yi are the current index of xs/ys, not necessarily in sync with combo_idx due to mismatched codes
xi = 0
yi = 0
offset = 0
for row in combo_idx:
# Find xrow and yrow, if available
xrow, yrow = -1, -1
if xi < len(xs_rows) and xs_rows[xi] == row:
xrow = xi
xi += 1
if yi < len(ys_rows) and ys_rows[yi] == row:
yrow = yi
yi += 1
# Iterate over x and y indices for this row
if xrow >= 0 and yrow >= 0:
for xj in range(xs_indptr[xrow], xs_indptr[xrow + 1]):
for yj in range(ys_indptr[yrow], ys_indptr[yrow + 1]):
r_rows[offset] = row
col_idx = xs_col_indices[xj] + ys_col_indices[yj]
r_cols[offset] = col_idx
# Could do the computation here between r1 and r2 rather than keeping them separate
r_vals[offset] = op(xs_values[xj], ys_values[yj])
offset += 1
elif xrow >= 0:
if not fill_y:
continue
for xj in range(xs_indptr[xrow], xs_indptr[xrow + 1]):
r_rows[offset] = row
r_cols[offset] = xs_col_indices[xj]
other_val = xs_values[xj] if y_fillval is None else y_fillval
r_vals[offset] = op(xs_values[xj], other_val)
offset += 1
elif yrow >= 0:
if not fill_x:
continue
for yj in range(ys_indptr[yrow], ys_indptr[yrow + 1]):
r_rows[offset] = row
r_cols[offset] = ys_col_indices[yj]
other_val = ys_values[yj] if x_fillval is None else x_fillval
r_vals[offset] = op(ys_values[yj], other_val)
offset += 1
else:
raise Exception("Unhandled row")
|
[
"grblas.Vector.new",
"numpy.zeros",
"grblas.Matrix.new",
"grblas.dtypes.lookup_dtype",
"grblas.Matrix.from_values"
] |
[((7765, 7834), 'grblas.Matrix.from_values', 'grblas.Matrix.from_values', (['index', 'index', 'vals'], {'nrows': 'size', 'ncols': 'size'}), '(index, index, vals, nrows=size, ncols=size)\n', (7790, 7834), False, 'import grblas\n'), ((9584, 9635), 'grblas.Matrix.new', 'grblas.Matrix.new', (['x.vector.dtype', 'x.vector.size', '(1)'], {}), '(x.vector.dtype, x.vector.size, 1)\n', (9601, 9635), False, 'import grblas\n'), ((9670, 9721), 'grblas.Matrix.new', 'grblas.Matrix.new', (['y.vector.dtype', 'y.vector.size', '(1)'], {}), '(y.vector.dtype, y.vector.size, 1)\n', (9687, 9721), False, 'import grblas\n'), ((12307, 12344), 'numpy.zeros', 'np.zeros', (['(x1_size,)'], {'dtype': 'np.uint64'}), '((x1_size,), dtype=np.uint64)\n', (12315, 12344), True, 'import numpy as np\n'), ((12363, 12400), 'numpy.zeros', 'np.zeros', (['(x1_size,)'], {'dtype': 'np.uint64'}), '((x1_size,), dtype=np.uint64)\n', (12371, 12400), True, 'import numpy as np\n'), ((12419, 12465), 'numpy.zeros', 'np.zeros', (['(x1_size,)'], {'dtype': "xs['values'].dtype"}), "((x1_size,), dtype=xs['values'].dtype)\n", (12427, 12465), True, 'import numpy as np\n'), ((12484, 12521), 'numpy.zeros', 'np.zeros', (['(y1_size,)'], {'dtype': 'np.uint64'}), '((y1_size,), dtype=np.uint64)\n', (12492, 12521), True, 'import numpy as np\n'), ((12540, 12577), 'numpy.zeros', 'np.zeros', (['(y1_size,)'], {'dtype': 'np.uint64'}), '((y1_size,), dtype=np.uint64)\n', (12548, 12577), True, 'import numpy as np\n'), ((12596, 12642), 'numpy.zeros', 'np.zeros', (['(y1_size,)'], {'dtype': "ys['values'].dtype"}), "((y1_size,), dtype=ys['values'].dtype)\n", (12604, 12642), True, 'import numpy as np\n'), ((13858, 13899), 'numpy.zeros', 'np.zeros', (['(result_size,)'], {'dtype': 'np.uint64'}), '((result_size,), dtype=np.uint64)\n', (13866, 13899), True, 'import numpy as np\n'), ((13917, 13958), 'numpy.zeros', 'np.zeros', (['(result_size,)'], {'dtype': 'np.uint64'}), '((result_size,), dtype=np.uint64)\n', (13925, 13958), True, 'import numpy as np\n'), ((5621, 5662), 'grblas.Vector.new', 'grblas.Vector.new', (['a2.dtype'], {'size': 'a2.size'}), '(a2.dtype, size=a2.size)\n', (5638, 5662), False, 'import grblas\n'), ((5840, 5881), 'grblas.Vector.new', 'grblas.Vector.new', (['b2.dtype'], {'size': 'b2.size'}), '(b2.dtype, size=b2.size)\n', (5857, 5881), False, 'import grblas\n'), ((6696, 6755), 'grblas.Matrix.new', 'grblas.Matrix.new', (['a2.dtype'], {'nrows': 'a2.nrows', 'ncols': 'a2.ncols'}), '(a2.dtype, nrows=a2.nrows, ncols=a2.ncols)\n', (6713, 6755), False, 'import grblas\n'), ((6933, 6992), 'grblas.Matrix.new', 'grblas.Matrix.new', (['b2.dtype'], {'nrows': 'b2.nrows', 'ncols': 'b2.ncols'}), '(b2.dtype, nrows=b2.nrows, ncols=b2.ncols)\n', (6950, 6992), False, 'import grblas\n'), ((8974, 9033), 'grblas.Matrix.new', 'grblas.Matrix.new', (['y2.dtype'], {'nrows': 'y2.nrows', 'ncols': 'y2.ncols'}), '(y2.dtype, nrows=y2.nrows, ncols=y2.ncols)\n', (8991, 9033), False, 'import grblas\n'), ((13566, 13612), 'grblas.dtypes.lookup_dtype', 'grblas.dtypes.lookup_dtype', (["xs['values'].dtype"], {}), "(xs['values'].dtype)\n", (13592, 13612), False, 'import grblas\n'), ((13626, 13672), 'grblas.dtypes.lookup_dtype', 'grblas.dtypes.lookup_dtype', (["ys['values'].dtype"], {}), "(ys['values'].dtype)\n", (13652, 13672), False, 'import grblas\n'), ((14536, 14585), 'grblas.Matrix.from_values', 'grblas.Matrix.from_values', (['r_rows', 'r_cols', 'r_vals'], {}), '(r_rows, r_cols, r_vals)\n', (14561, 14585), False, 'import grblas\n'), ((13153, 13251), 'grblas.Matrix.from_values', 'grblas.Matrix.from_values', (['r1_rows', 'r1_cols', 'r1_vals'], {'nrows': 'x.matrix.nrows', 'ncols': '(top_mask + 1)'}), '(r1_rows, r1_cols, r1_vals, nrows=x.matrix.nrows,\n ncols=top_mask + 1)\n', (13178, 13251), False, 'import grblas\n'), ((13327, 13425), 'grblas.Matrix.from_values', 'grblas.Matrix.from_values', (['r2_rows', 'r2_cols', 'r2_vals'], {'nrows': 'x.matrix.nrows', 'ncols': '(top_mask + 1)'}), '(r2_rows, r2_cols, r2_vals, nrows=x.matrix.nrows,\n ncols=top_mask + 1)\n', (13352, 13425), False, 'import grblas\n'), ((13719, 13766), 'grblas.dtypes.lookup_dtype', 'grblas.dtypes.lookup_dtype', (['unified_input_dtype'], {}), '(unified_input_dtype)\n', (13745, 13766), False, 'import grblas\n'), ((14007, 14051), 'grblas.dtypes.lookup_dtype', 'grblas.dtypes.lookup_dtype', (['output_dtype_str'], {}), '(output_dtype_str)\n', (14033, 14051), False, 'import grblas\n')]
|
"""
Definice mapování URL na jednotlivá view.
"""
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
from rest_framework.schemas import get_schema_view
urlpatterns = [
# API mapovani
path("api/v1/", include("api.urls")),
# favicona pro starsi prohlizece
path("favicon.ico", RedirectView.as_view(url=staticfiles_storage.url("admin/favicon.ico"))),
# dynamicke OpenAPI schema
path(
"api/open-api/",
get_schema_view(
title="ÚPadmin API",
description="Dokumentace *REST API* pro aplikaci **ÚPadmin**. **[Přejít do aplikace](/)**",
version="1.0.0",
urlconf="api.urls",
),
name="openapi-schema",
),
# Swagger UI dokumentace API
path("api/docs/", TemplateView.as_view(template_name="swagger-ui.html"), name="swagger-ui"),
# vychozi stranka (serviruje React aplikaci)
re_path(r"^", TemplateView.as_view(template_name="react-autogenerate.html")),
]
# povoleni django-debug-toolbar stranek
if settings.DEBUG or settings.MANUAL_PRODUCTION:
import debug_toolbar
urlpatterns.insert(0, re_path(r"^__debug__/", include(debug_toolbar.urls)))
|
[
"django.views.generic.TemplateView.as_view",
"rest_framework.schemas.get_schema_view",
"django.contrib.staticfiles.storage.staticfiles_storage.url",
"django.urls.include"
] |
[((401, 420), 'django.urls.include', 'include', (['"""api.urls"""'], {}), "('api.urls')\n", (408, 420), False, 'from django.urls import include, path, re_path\n'), ((631, 806), 'rest_framework.schemas.get_schema_view', 'get_schema_view', ([], {'title': '"""ÚPadmin API"""', 'description': '"""Dokumentace *REST API* pro aplikaci **ÚPadmin**. **[Přejít do aplikace](/)**"""', 'version': '"""1.0.0"""', 'urlconf': '"""api.urls"""'}), "(title='ÚPadmin API', description=\n 'Dokumentace *REST API* pro aplikaci **ÚPadmin**. **[Přejít do aplikace](/)**'\n , version='1.0.0', urlconf='api.urls')\n", (646, 806), False, 'from rest_framework.schemas import get_schema_view\n'), ((950, 1003), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""swagger-ui.html"""'}), "(template_name='swagger-ui.html')\n", (970, 1003), False, 'from django.views.generic import TemplateView\n'), ((1092, 1153), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""react-autogenerate.html"""'}), "(template_name='react-autogenerate.html')\n", (1112, 1153), False, 'from django.views.generic import TemplateView\n'), ((1324, 1351), 'django.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (1331, 1351), False, 'from django.urls import include, path, re_path\n'), ((509, 553), 'django.contrib.staticfiles.storage.staticfiles_storage.url', 'staticfiles_storage.url', (['"""admin/favicon.ico"""'], {}), "('admin/favicon.ico')\n", (532, 553), False, 'from django.contrib.staticfiles.storage import staticfiles_storage\n')]
|
import torch
import librosa
import numpy as np
import mlflow.pytorch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from librosa.feature import mfcc
def predict(model, x):
n_mfcc = 40
sample_rate = 22050
mel_coefficients = mfcc(x, sample_rate, n_mfcc=n_mfcc)
time_frames = mel_coefficients.shape[-1]
chunks = int(np.ceil(time_frames / 160))
pad_size = (chunks * 160) - time_frames
mfcc_pad = np.pad(mel_coefficients, ((0, 0), (0, pad_size)), mode="wrap")
mfcc_split = np.split(mfcc_pad, 160, axis=1)
mfcc_batch = np.stack(mfcc_split)
probabilities = model(torch.FloatTensor(mfcc_batch).to("cuda"))
classifications = torch.argmax(probabilities, dim=-1)
sequence = torch.reshape(classifications, (1, -1))
original_length = x.size
predictions = F.interpolate(sequence.unsqueeze(0).to(torch.float), original_length, mode='linear')
predictions = torch.squeeze(predictions).to(torch.int64)
return predictions
def main():
model_path = "mlruns/0/ec90ba732dec470492d3d6e7089e644b/artifacts/model"
model = mlflow.pytorch.load_model(model_path)
audio, _ = librosa.load("data/reddit/1vtdusf08us21-DASH_240.wav")
predictions = predict(model, audio)
predictions = predictions.cpu().detach().numpy()
fig, ax = plt.subplots()
ax.plot(audio)
ax.fill_between(np.arange(len(predictions)), 0, 1, color="green", where=predictions==1, alpha=0.3, transform=ax.get_xaxis_transform())
ax.fill_between(np.arange(len(predictions)), 0, 1, color="red", where=predictions==2, alpha=0.3, transform=ax.get_xaxis_transform())
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.pad",
"numpy.stack",
"matplotlib.pyplot.show",
"numpy.ceil",
"torch.argmax",
"torch.FloatTensor",
"matplotlib.pyplot.subplots",
"numpy.split",
"torch.squeeze",
"librosa.load",
"torch.reshape",
"librosa.feature.mfcc"
] |
[((257, 292), 'librosa.feature.mfcc', 'mfcc', (['x', 'sample_rate'], {'n_mfcc': 'n_mfcc'}), '(x, sample_rate, n_mfcc=n_mfcc)\n', (261, 292), False, 'from librosa.feature import mfcc\n'), ((443, 505), 'numpy.pad', 'np.pad', (['mel_coefficients', '((0, 0), (0, pad_size))'], {'mode': '"""wrap"""'}), "(mel_coefficients, ((0, 0), (0, pad_size)), mode='wrap')\n", (449, 505), True, 'import numpy as np\n'), ((523, 554), 'numpy.split', 'np.split', (['mfcc_pad', '(160)'], {'axis': '(1)'}), '(mfcc_pad, 160, axis=1)\n', (531, 554), True, 'import numpy as np\n'), ((573, 593), 'numpy.stack', 'np.stack', (['mfcc_split'], {}), '(mfcc_split)\n', (581, 593), True, 'import numpy as np\n'), ((684, 719), 'torch.argmax', 'torch.argmax', (['probabilities'], {'dim': '(-1)'}), '(probabilities, dim=-1)\n', (696, 719), False, 'import torch\n'), ((735, 774), 'torch.reshape', 'torch.reshape', (['classifications', '(1, -1)'], {}), '(classifications, (1, -1))\n', (748, 774), False, 'import torch\n'), ((1150, 1204), 'librosa.load', 'librosa.load', (['"""data/reddit/1vtdusf08us21-DASH_240.wav"""'], {}), "('data/reddit/1vtdusf08us21-DASH_240.wav')\n", (1162, 1204), False, 'import librosa\n'), ((1312, 1326), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1324, 1326), True, 'import matplotlib.pyplot as plt\n'), ((1628, 1638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1636, 1638), True, 'import matplotlib.pyplot as plt\n'), ((355, 381), 'numpy.ceil', 'np.ceil', (['(time_frames / 160)'], {}), '(time_frames / 160)\n', (362, 381), True, 'import numpy as np\n'), ((926, 952), 'torch.squeeze', 'torch.squeeze', (['predictions'], {}), '(predictions)\n', (939, 952), False, 'import torch\n'), ((620, 649), 'torch.FloatTensor', 'torch.FloatTensor', (['mfcc_batch'], {}), '(mfcc_batch)\n', (637, 649), False, 'import torch\n')]
|
#!/usr/bin/env python
import os
import pprint
import xml.dom.minidom
_MIN_PROJECT_ID = 0
_MAX_PROJECT_ID = 255
_MIN_FEATURE_ID = 0
_MAX_FEATURE_ID = 255
_MIN_CLASS_ID = 0
_MAX_CLASS_ID = 255
_MIN_CMD_ID = 0
_MAX_CMD_ID = 65535
_FTR_GEN = 'generic'
#===============================================================================
#===============================================================================
class ArParserError(Exception):
pass
#===============================================================================
#===============================================================================
class ArCmdListType(object):
NONE = 0
LIST = 1
MAP = 2
TO_STRING = {NONE: "NONE", LIST: "LIST_ITEM", MAP: "MAP_ITEM"}
FROM_STRING = {"NONE": NONE, "LIST_ITEM": LIST, "MAP_ITEM": MAP}
#===============================================================================
#===============================================================================
class ArCmdBufferType(object):
NON_ACK = 0
ACK = 1
HIGH_PRIO = 2
TO_STRING = {NON_ACK: "NON_ACK", ACK: "ACK", HIGH_PRIO: "HIGH_PRIO"}
FROM_STRING = {"NON_ACK": NON_ACK, "ACK": ACK, "HIGH_PRIO": HIGH_PRIO}
#===============================================================================
#===============================================================================
class ArCmdTimeoutPolicy(object):
POP = 0
RETRY = 1
TO_STRING = {POP: "POP", RETRY: "RETRY"}
FROM_STRING = {"POP": POP, "RETRY": RETRY}
#===============================================================================
#===============================================================================
class ArCmdContent(object):
UPDATE = 0
NOTIFICATION = 1
TO_STRING = {UPDATE: "UPDATE", NOTIFICATION: "NOTIFICATION"}
FROM_STRING = {"UPDATE": UPDATE, "NOTIFICATION": NOTIFICATION}
#===============================================================================
#===============================================================================
class ArCmdDeprecation(object):
TO_STRING = {True: "true", False: "false"}
FROM_STRING = {"true": True, "false": False}
#===============================================================================
#===============================================================================
class ArArgType(object):
I8 = 0
U8 = 1
I16 = 2
U16 = 3
I32 = 4
U32 = 5
I64 = 6
U64 = 7
FLOAT = 8
DOUBLE = 9
STRING = 10
ENUM = 11
BITFIELD = 12
MULTISETTING = 13
TO_STRING = {I8: "i8", U8: "u8", I16: "i16", U16: "u16",
I32: "i32", U32: "u32", I64: "i64", U64: "u64",
FLOAT: "float", DOUBLE: "double", STRING: "string",
ENUM: "enum", BITFIELD: "bitfield", MULTISETTING: "multisetting"}
FROM_STRING = {"i8": I8, "u8": U8, "i16": I16, "u16": U16,
"i32": I32, "u32": U32, "i64": I64, "u64": U64,
"float": FLOAT, "double": DOUBLE, "string": STRING,
"enum": ENUM, "bitfield": BITFIELD, "multisetting": MULTISETTING}
#===============================================================================
#===============================================================================
class ArParserCtx(object):
def __init__(self):
self.projects = []
self.projectsById = {}
self.projectsByName = {}
self.features = []
self.featuresById = {}
self.featuresByName = {}
def walk_classes(self):
for projectObj in self.projects:
for classObj in projectObj.classes:
yield (projectObj, classObj)
def walk_cmds(self):
for projectObj in self.projects:
for classObj in projectObj.classes:
for cmdObj in classObj.cmds:
yield (projectObj, classObj, cmdObj)
def walk_msgs(self):
for featureObj in self.features:
for msg in featureObj.getMsgs():
yield (featureObj, msg)
#===============================================================================
#===============================================================================
class ArProject(object):
def __init__(self, name, projectId, doc):
self.name = name
self.projectId = projectId
self.doc = doc
self.classes = []
self.classesById = {}
self.classesByName = {}
def __repr__(self):
return ("{name='%s', projectId=%d, doc='%s', classes=%s}" % (
self.name,
self.projectId,
repr(self.doc),
pprint.pformat(self.classes)))
#===============================================================================
#===============================================================================
class ArFeature(object):
def __init__(self, name, featureId, doc):
self.name = name
self.featureId = featureId
self.doc = doc
self.enums = []
self.enumsByName = {}
self.multisets = []
self.multisetsByName = {}
self.cmds = []
self.cmdsById = {} #only for real feature, empty for project
self.cmdsByName = {} #only for real feature, empty for project
self.evts = []
self.evtsById = {}
self.evtsByName = {}
self.classes = None #only for project conversion
self.classesById = {} #only for project conversion
self.classesByName = {} #only for project conversion
def getMsgs (self):
return self.cmds + self.evts
def getMsgsById (self):
#only for feature
return dict(self.cmdsById, **self.evtsById)
def getMsgsByName (self):
#only for feature
return dict(self.cmdsByName, **self.evtsByName)
def __repr__(self):
return ("{name='%s', featureId=%d, doc='%s', enums='%s', "
"multisets='%s', cmds='%s', evts='%s'}" % (
self.name,
self.featureId,
repr(self.doc),
pprint.pformat(self.enums),
pprint.pformat(self.multisets),
pprint.pformat(self.cmds),
pprint.pformat(self.evts)))
@staticmethod
def from_project(prj):
ftrObj = ArFeature (prj.name, prj.projectId, prj.doc)
ftrObj.classes = prj.classes
ftrObj.classesById = prj.classesById
ftrObj.classesByName = prj.classesByName
for cl in prj.classes:
for cmd in cl.cmds:
msgId = cmd.cmdId
msgName = cmd.name
if "event" in cl.name.lower() or "state" in cl.name.lower():
msgObj = ArEvt(msgName, msgId, cmd.doc, cmd.listType,
cmd.bufferType, cmd.timeoutPolicy, cmd.content,
cmd.isDeprecated, ftrObj)
else:
msgObj = ArCmd(msgName, msgId, cmd.doc, cmd.listType,
cmd.bufferType, cmd.timeoutPolicy, cmd.content,
cmd.isDeprecated, ftrObj)
if cmd.listType == ArCmdListType.MAP:
msgObj.mapKey = cmd.args[0]
msgObj.cls = cl
msgObj.args = cmd.args
msgObj.argsByName = cmd.argsByName
# Create enums
for arg in msgObj.args:
if len(arg.enums) > 0:
enumName = cl.name + '_' +\
cmd.name[0].upper()+cmd.name[1:]+'_' +\
arg.name[0].upper()+arg.name[1:]
enumObj = ArEnum(enumName, arg.doc)
enumObj.msg = msgObj
for val in arg.enums:
eValObj = ArEnumValue(val.name, val.value, val.doc)
enumObj.values.append(eValObj)
enumObj.valuesByName[val.name] = eValObj
ftrObj.enums.append(enumObj)
ftrObj.enumsByName[enumName] = enumObj
arg.argType = enumObj
arg.doc = ''
if isinstance(msgObj, ArCmd):
ftrObj.cmds.append(msgObj)
else:
ftrObj.evts.append(msgObj)
return ftrObj
#===============================================================================
#===============================================================================
class ArClass(object):
def __init__(self, name, classId, doc):
self.name = name
self.classId = classId
self.doc = doc
self.cmds = []
self.cmdsById = {}
self.cmdsByName = {}
def __repr__(self):
return ("{name='%s', classId=%d, doc='%s', cmds=%s}" % (
self.name,
self.classId,
repr(self.doc),
pprint.pformat(self.cmds)))
#===============================================================================
#===============================================================================
class ArMsg(object):
def __init__(self, name, cmdId, doc, listType, bufferType, timeoutPolicy,
content, isDeprecated, ftr):
self.name = name
self.cmdId = cmdId
self.doc = doc
self.listType = listType
self.bufferType = bufferType
self.timeoutPolicy = timeoutPolicy
self.content = content
self.mapKey = None
self.args = []
self.argsByName = {}
self.cls = None #only for project conversion
self.isDeprecated = isDeprecated
self.ftr = ftr
def __repr__(self):
return ("{name='%s', cmdId=%d, doc='%s', listType='%s', "
"bufferType='%s', timeoutPolicy='%s', content='%s', "
"args=%s isDeprecated=%r}" % (
self.name,
self.cmdId,
repr(self.doc),
ArCmdListType.TO_STRING[self.listType],
ArCmdBufferType.TO_STRING[self.bufferType],
ArCmdTimeoutPolicy.TO_STRING[self.timeoutPolicy],
ArCmdContent.TO_STRING[self.content],
pprint.pformat(self.args),
self.isDeprecated))
#===============================================================================
#===============================================================================
class ArCmd(ArMsg):
def __init__(self, name, cmdId, doc, listType, bufferType, timeoutPolicy,
content, isDeprecated, ftr):
ArMsg.__init__(self, name, cmdId, doc, listType, bufferType,
timeoutPolicy, content, isDeprecated, ftr)
#===============================================================================
#===============================================================================
class ArEvt(ArMsg):
def __init__(self, name, cmdId, doc, listType, bufferType, timeoutPolicy,
content, isDeprecated, ftr):
ArMsg.__init__(self, name, cmdId, doc, listType, bufferType,
timeoutPolicy, content, isDeprecated, ftr)
#===============================================================================
#===============================================================================
class ArComment(object):
def __init__(self, title, desc, support, triggered, result):
self.title = title
self.desc = desc
self.support = support
self.triggered = triggered
self.result = result
def __repr__(self):
return ("{title='%s', desc=%s, support='%s', triggered='%s', "
"result='%s'}" % (
self.title,
self.desc,
self.support,
self.triggered,
self.result))
#===============================================================================
#===============================================================================
class ArArg(object):
def __init__(self, name, argType, doc):
self.name = name
self.argType = argType
self.doc = doc
self.enums = []
self.enumsByName = {}
def __repr__(self):
if isinstance(self.argType, str):
argTypeRep = ArArgType.TO_STRING[self.argType]
else:
argTypeRep = pprint.pformat(self.argType)
return ("{name='%s', argType='%s', doc='%s', enums=%s}" % (
self.name,
argTypeRep,
repr(self.doc),
pprint.pformat(self.enums)))
#===============================================================================
#===============================================================================
class ArMultiSetting(object):
def __init__(self, name, doc):
self.name = name
self.doc = doc
self.links = []
self.msgs = []
def __repr__(self):
return ("{name='%s', doc='%s', msgs=%s}" % (
self.name,
repr(self.doc),
pprint.pformat(self.msgs)))
#===============================================================================
#===============================================================================
class ArEnumValue(object):
def __init__(self, name, value, doc):
self.name = name
self.doc = doc
self.value = value
def __cmp__(self, other):
return cmp(self.value, other.value)
def __repr__(self):
return ("{name='%s', value=%d, doc='%s'}" % (
self.name,
self.value,
repr(self.doc)))
#===============================================================================
#===============================================================================
class ArEnum(object):
def __init__(self, name, doc):
self.name = name
self.doc = doc
self.values = []
self.valuesByName = {}
self.usedLikeBitfield = False
self.msg = None #only for project conversion
def getMaxBitfieldVal(self):
return 2 ** max(self.values).value
def __repr__(self):
return ("{name='%s', doc='%s', values='%s'}" % (
self.name,
repr(self.doc),
pprint.pformat(self.values)))
#===============================================================================
#===============================================================================
class ArBitfield(object):
TYPE_TO_LENGTH = {ArArgType.U8:2**7, ArArgType.U16:2**15, ArArgType.U32:2**31, ArArgType.U64:2**63}
def __init__(self, enum, btfType):
self.enum = enum
self.btfType = btfType
def __repr__(self):
return ("{enum='%s', type='%s'}" % (
pprint.pformat(self.enum),
pprint.pformat(self.btfType)))
#===============================================================================
#===============================================================================
def _get_node_content(node):
try:
content = node.childNodes[0].nodeValue.strip()
lines = [l.strip() for l in content.split('\n')]
return '\n'.join(lines)
except:
return ''
#===============================================================================
#===============================================================================
def _parse_project_node(filePath, projectNode, projectObj):
for classNode in projectNode.getElementsByTagName("class"):
className = classNode.getAttribute("name")
classId = int(classNode.getAttribute("id"))
classDoc = _get_node_content(classNode).strip()
# Check class id/name
if classId in projectObj.classesById:
raise ArParserError("%s: Duplicate class id %d" % (
filePath, classId))
if className in projectObj.classesByName:
raise ArParserError("%s: Duplicate class name '%s'" % (
filePath, className))
if classId < _MIN_CLASS_ID or classId > _MAX_CLASS_ID:
raise ArParserError("%s: Invalid class id %d" % (
filePath, classId))
# Create class object
classObj = ArClass(className, classId, classDoc)
projectObj.classes.append(classObj)
projectObj.classesById[classId] = classObj
projectObj.classesByName[className] = classObj
# Parse class node
_parse_class_node(filePath, classNode, classObj)
#===============================================================================
#===============================================================================
def _parse_feature_node(ctx, filePath, featureNode, featureObj):
for enumsNode in featureNode.getElementsByTagName("enums"):
for enumNode in enumsNode.getElementsByTagName("enum"):
enumName = enumNode.getAttribute("name")
enumDoc = _get_node_content(enumNode).strip()
# Check enum name
if enumName in featureObj.enumsByName:
raise ArParserError("%s: Duplicate enum name '%s'" % (
filePath, enumName))
# Create enum object
enumObj = ArEnum(enumName, enumDoc)
featureObj.enums.append(enumObj)
featureObj.enumsByName[enumName] = enumObj
# Parse enum node
_parse_enum_node(filePath, enumNode, enumObj)
_parse_feature_node_multisets(ctx, filePath, featureNode, featureObj)
_parse_feature_node_msgs(ctx, filePath, featureNode, featureObj)
#===============================================================================
#===============================================================================
def _parse_feature_node_multisets(ctx, filePath, featureNode, featureObj):
for multisetsNode in featureNode.getElementsByTagName("multisettings"):
for multisetNode in multisetsNode.getElementsByTagName("multisetting"):
multisetName = multisetNode.getAttribute("name")
multisetDoc = _get_node_content(multisetNode).strip()
# Check multiset name
if multisetName in featureObj.multisetsByName:
raise ArParserError("%s: Duplicate multiset name '%s'" % (
filePath, multisetName))
# Create multiset object
multisetObj = ArMultiSetting(multisetName, multisetDoc)
featureObj.multisets.append(multisetObj)
featureObj.multisetsByName[multisetName] = multisetObj
# Parse multiset node
_parse_multiset_node(filePath, multisetNode, multisetObj)
#===============================================================================
#===============================================================================
def _parse_multiset_node(filePath, multisetNode, multisetObj):
for memberNode in multisetNode.getElementsByTagName("member"):
multisetObj.links.append(memberNode.getAttribute("link"))
#===============================================================================
#===============================================================================
def _parse_feature_node_msgs(ctx, filePath, featureNode, featureObj):
for msgsNode in featureNode.getElementsByTagName("msgs"):
for msgNode in msgsNode.getElementsByTagName("cmd") + \
msgsNode.getElementsByTagName("evt"):
msgName = msgNode.getAttribute("name")
msgId = int(msgNode.getAttribute("id"))
msgDoc = _get_node_content(msgNode).strip()
if msgId < _MIN_CMD_ID or msgId > _MAX_CMD_ID:
raise ArParserError("%s: Invalid msg id %d" % (
filePath, msgId))
# Check msg name
if msgName in featureObj.getMsgsByName():
raise ArParserError("%s: Duplicate message name '%s'" % (
filePath, msgName))
# Check msg id
if msgId in featureObj.getMsgsById():
raise ArParserError("%s: Duplicate message id '%s'" % (
filePath, msgName))
# Get type
msgType = ArCmdListType.NONE
mapKey = None
if msgNode.hasAttribute("type"):
attr, _, mapKey = msgNode.getAttribute("type").partition(':')
if attr not in ArCmdListType.FROM_STRING:
raise ArParserError("%s: Invalid list type '%s'" % (
filePath, attr))
msgType = ArCmdListType.FROM_STRING[attr]
# Get buffer type
msgBufferType = ArCmdBufferType.ACK
if msgNode.hasAttribute("buffer"):
attr = msgNode.getAttribute("buffer")
if attr not in ArCmdBufferType.FROM_STRING:
raise ArParserError("%s: Invalid buffer type '%s'" % (
filePath, attr))
msgBufferType = ArCmdBufferType.FROM_STRING[attr]
# Get timeout policy
msgTimeoutPolicy = ArCmdTimeoutPolicy.POP
if msgNode.hasAttribute("timeout"):
attr = msgNode.getAttribute("timeout")
if attr not in ArCmdTimeoutPolicy.FROM_STRING:
raise ArParserError("%s: Invalid timout policy '%s'" % (
filePath, attr))
msgTimeoutPolicy = ArCmdTimeoutPolicy.FROM_STRING[attr]
# Get Content
msgContent = ArCmdContent.UPDATE
if msgNode.hasAttribute("content"):
attr = msgNode.getAttribute("content")
if attr not in ArCmdContent.FROM_STRING:
raise ArParserError("%s: Invalid notification '%s'" % (
filePath, attr))
msgContent = ArCmdContent.FROM_STRING[attr]
# Get if the message is deprecated
mgsIsDeprecated = False
if msgNode.hasAttribute("deprecated"):
attr = msgNode.getAttribute("deprecated")
if attr not in ArCmdDeprecation.FROM_STRING:
mgsIsDeprecated = ArCmdDeprecation.FROM_STRING[attr]
# Create msg object
if msgNode in msgsNode.getElementsByTagName("cmd"):
#is command
msgObj = ArCmd (msgName, msgId, msgDoc,
msgType, msgBufferType, msgTimeoutPolicy, msgContent,
mgsIsDeprecated, featureObj)
else:
#is event
msgObj = ArEvt(msgName, msgId, msgDoc,
msgType, msgBufferType, msgTimeoutPolicy, msgContent,
mgsIsDeprecated, featureObj)
# Parse msg node
_parse_msg_node(ctx, filePath, featureObj, msgNode, msgObj)
# Find map key
if mapKey :
if mapKey not in msgObj.argsByName:
raise ArParserError("%s: Invalid Map Key '%s'" % (
filePath, mapKey))
msgObj.mapKey = msgObj.argsByName[mapKey]
if isinstance(msgObj, ArCmd):
featureObj.cmds.append(msgObj)
featureObj.cmdsById[msgId] = msgObj
featureObj.cmdsByName[msgName] = msgObj
else:
featureObj.evts.append(msgObj)
featureObj.evtsById[msgId] = msgObj
featureObj.evtsByName[msgName] = msgObj
#===============================================================================
#===============================================================================
def _parse_class_node(filePath, classNode, classObj):
for cmdNode in classNode.getElementsByTagName("cmd"):
cmdName = cmdNode.getAttribute("name")
cmdId = int(cmdNode.getAttribute("id"))
cmdDoc = _get_cmt_node(cmdNode)
if cmdId < _MIN_CMD_ID or cmdId > _MAX_CMD_ID:
raise ArParserError("%s: Invalid cmd id %d" % (
filePath, cmdId))
# Get list type
cmdListType = ArCmdListType.NONE
if cmdNode.hasAttribute("type"):
attr = cmdNode.getAttribute("type")
if attr not in ArCmdListType.FROM_STRING:
raise ArParserError("%s: Invalid list type '%s'" % (
filePath, attr))
cmdListType = ArCmdListType.FROM_STRING[attr]
# Get buffer type
cmdBufferType = ArCmdBufferType.ACK
if cmdNode.hasAttribute("buffer"):
attr = cmdNode.getAttribute("buffer")
if attr not in ArCmdBufferType.FROM_STRING:
raise ArParserError("%s: Invalid buffer type '%s'" % (
filePath, attr))
cmdBufferType = ArCmdBufferType.FROM_STRING[attr]
# Get timeout policy
cmdTimeoutPolicy = ArCmdTimeoutPolicy.POP
if cmdNode.hasAttribute("timeout"):
attr = cmdNode.getAttribute("timeout")
if attr not in ArCmdTimeoutPolicy.FROM_STRING:
raise ArParserError("%s: Invalid timout policy '%s'" % (
filePath, attr))
cmdTimeoutPolicy = ArCmdTimeoutPolicy.FROM_STRING[attr]
# Check cmd name
if cmdName in classObj.cmdsByName:
raise ArParserError("%s: Duplicate cmd name '%s'" % (
filePath, cmdName))
# Get cmd Content
cmdContent = ArCmdContent.UPDATE
if cmdNode.hasAttribute("content"):
attr = cmdNode.getAttribute("content")
if attr not in ArCmdContent.FROM_STRING:
raise ArParserError("%s: Invalid notification '%s'" % (
filePath, attr))
cmdContent = ArCmdContent.FROM_STRING[attr]
# Get if the message is deprecated
mgsIsDeprecated = False
if cmdNode.hasAttribute("deprecated"):
attr = cmdNode.getAttribute("deprecated")
if attr == "true":
mgsIsDeprecated = True
# Create cmd object
cmdObj = ArCmd(cmdName, cmdId, cmdDoc, cmdListType, cmdBufferType,
cmdTimeoutPolicy, cmdContent, mgsIsDeprecated, None)
cmdObj.cls = classObj
classObj.cmds.append(cmdObj)
classObj.cmdsById[cmdId] = cmdObj
classObj.cmdsByName[cmdName] = cmdObj
# Parse cmd node
_parse_prj_cmd_node(filePath, cmdNode, cmdObj)
#===============================================================================
#===============================================================================
def _parse_prj_cmd_node(filePath, cmdNode, cmdObj):
for argNode in cmdNode.getElementsByTagName("arg"):
argName = argNode.getAttribute("name")
argDoc = _get_node_content(argNode).strip()
# Arg type
attr = argNode.getAttribute("type")
if attr not in ArArgType.FROM_STRING:
raise ArParserError("%s: Invalid arg type '%s'" % (
filePath, attr))
argType = ArArgType.FROM_STRING[attr]
# Check arg name
if argName in cmdObj.argsByName:
raise ArParserError("%s: Duplicate arg name '%s'" % (
filePath, argName))
# Create arg object
argObj = ArArg(argName, argType, argDoc)
cmdObj.args.append(argObj)
cmdObj.argsByName[argName] = argObj
# Parse arg node
_parse_arg_node(filePath, argNode, argObj)
def _fmt_cmt_node(raw_cmt):
one_line = ' '.join(raw_cmt.split())
lines = [l.strip() for l in one_line.split(r'\n')]
res = '\n'.join(lines)
return res
def _get_cmt_node(msgNode):
if msgNode.getElementsByTagName("comment"):
commentNode = msgNode.getElementsByTagName("comment")[0]
cmtTitle = commentNode.getAttribute("title")
cmtSupport = commentNode.getAttribute("support")
cmtDesc = _fmt_cmt_node(commentNode.getAttribute("desc"))
if commentNode.hasAttribute("triggered"):
cmtTriggered = _fmt_cmt_node(commentNode.getAttribute("triggered"))
else:
cmtTriggered = None
if commentNode.hasAttribute("result"):
cmtResult = _fmt_cmt_node(commentNode.getAttribute("result"))
else:
cmtResult = None
# Create comment object
return ArComment(cmtTitle, cmtDesc, cmtSupport,
cmtTriggered, cmtResult)
else:
oldComment = _get_node_content(msgNode)
return ArComment(oldComment.splitlines()[0], oldComment, None,
None, None)
#===============================================================================
#===============================================================================
def _parse_msg_node(ctx, filePath, ftr, msgNode, msgObj):
if msgNode.getElementsByTagName("comment"):
commentNode = msgNode.getElementsByTagName("comment")[0]
cmtTitle = commentNode.getAttribute("title")
cmtSupport = commentNode.getAttribute("support")
cmtDesc = _fmt_cmt_node(commentNode.getAttribute("desc"))
if commentNode.hasAttribute("triggered"):
cmtTriggered = _fmt_cmt_node(commentNode.getAttribute("triggered"))
else:
cmtTriggered = None
if commentNode.hasAttribute("result"):
cmtResult = _fmt_cmt_node(commentNode.getAttribute("result"))
else:
cmtResult = None
# Create comment object
msgObj.doc = ArComment(cmtTitle, cmtDesc, cmtSupport,
cmtTriggered, cmtResult)
else:
oldComment = _get_node_content(msgNode)
msgObj.doc = ArComment(oldComment.splitlines()[0], oldComment, None,
None, None)
_parse_msg_node_args(ctx, filePath, ftr, msgNode, msgObj)
#===============================================================================
#===============================================================================
def _parse_msg_node_args(ctx, filePath, ftr, msgNode, msgObj):
for argNode in msgNode.getElementsByTagName("arg"):
argName = argNode.getAttribute("name")
argDoc = _get_node_content(argNode).strip()
# Get type attrs
attr1, _, flw = argNode.getAttribute("type").partition(':')
attr2, _, attr3 = flw.partition(':')
# Check arg type
if attr1 not in ArArgType.FROM_STRING:
raise ArParserError("%s: Invalid arg type '%s'" % (
filePath, attr1))
if ArArgType.FROM_STRING[attr1] == ArArgType.ENUM:
# Find Enum
if attr2 not in ftr.enumsByName and \
(_FTR_GEN not in ctx.featuresByName or \
attr2 not in ctx.featuresByName[_FTR_GEN].enumsByName):
raise ArParserError("%s: Invalid enum arg type '%s'" % (
filePath, attr2))
if attr2 in ftr.enumsByName:
argType = ftr.enumsByName[attr2]
else:
argType = ctx.featuresByName[_FTR_GEN].enumsByName[attr2]
elif ArArgType.FROM_STRING[attr1] == ArArgType.BITFIELD:
# Find Enum
if attr3 not in ftr.enumsByName and \
(_FTR_GEN not in ctx.featuresByName or \
attr3 not in ctx.featuresByName[_FTR_GEN].enumsByName):
raise ArParserError("%s: Invalid bitfield enum arg type '%s'"
% (filePath, attr3))
# Check bitfield length
if attr2 not in ArArgType.FROM_STRING and \
ArArgType.FROM_STRING[attr2] in ArBitfield.TYPE_TO_LENGTH:
raise ArParserError("%s: Invalid bitfield enum arg length '%s'"
% (filePath, attr2))
if attr3 in ftr.enumsByName:
btfEnum = ftr.enumsByName[attr3]
else:
btfEnum = ctx.featuresByName[_FTR_GEN].enumsByName[attr3]
btfType = ArArgType.FROM_STRING[attr2]
# Check Compatibility between Enum max value and bitfield length
if ArBitfield.TYPE_TO_LENGTH[btfType] < btfEnum.getMaxBitfieldVal():
raise ArParserError("%s: Too Small bitfield length '%s.%s'"
% (filePath, msgObj.name, argName))
argType = ArBitfield(btfEnum, btfType)
btfEnum.usedLikeBitfield = True
elif ArArgType.FROM_STRING[attr1] == ArArgType.MULTISETTING:
# Find multi setting
if attr2 not in ftr.multisetsByName and \
(_FTR_GEN not in ctx.featuresByName or \
attr2 not in ctx.featuresByName[_FTR_GEN].multisetsByName):
raise ArParserError("%s: Invalid multisetting arg type '%s'"
% (filePath, attr2))
if attr2 in ftr.multisetsByName:
argType = ftr.multisetsByName[attr2]
else:
argType = ctx.featuresByName[_FTR_GEN].multisetsByName[attr2]
else:
argType = ArArgType.FROM_STRING[attr1]
# Check arg name
if argName in msgObj.argsByName:
raise ArParserError("%s: Duplicate arg name '%s'" % (
filePath, argName))
# Create arg object
argObj = ArArg(argName, argType, argDoc)
msgObj.args.append(argObj)
msgObj.argsByName[argName] = argObj
# Parse arg node
_parse_arg_node(filePath, argNode, argObj)
#===============================================================================
#===============================================================================
def _parse_arg_node(filePath, argNode, argObj):
nextValue = 0
for enumNode in argNode.getElementsByTagName("enum"):
enumName = enumNode.getAttribute("name")
enumDoc = _get_node_content(enumNode).strip()
enumValue = nextValue
nextValue += 1
# Check enum name
if enumName in argObj.enumsByName:
raise ArParserError("%s: Duplicate enum name '%s'" % (
filePath, enumName))
# Create enum object
enumObj = ArEnumValue(enumName, enumValue, enumDoc)
argObj.enums.append(enumObj)
argObj.enumsByName[enumName] = enumObj
#===============================================================================
#===============================================================================
def _parse_enum_node(filePath, enumNode, enumObj):
nextValue = 0
for eValNode in enumNode.getElementsByTagName("value"):
eValName = eValNode.getAttribute("name")
eValDoc = _get_node_content(eValNode).strip()
if eValNode.hasAttribute("val"):
eValVal = int(eValNode.getAttribute("val"))
else:
eValVal = nextValue
nextValue += 1
nextValue = eValVal + 1
# Check enum value name
if eValName in enumObj.valuesByName:
raise ArParserError("%s: Duplicate enum value name '%s'" % (
filePath, eValName))
# Create enum value object
eValObj = ArEnumValue(eValName, eValVal, eValDoc)
enumObj.values.append(eValObj)
enumObj.valuesByName[eValName] = eValObj
#===============================================================================
#===============================================================================
def parse_prj_xml(ctx, filePath):
# Parse project xml file
try:
xmlDom = xml.dom.minidom.parse(filePath)
except Exception as ex:
raise ArParserError("Error while loading '%s': %s" % (
filePath, str(ex)))
# Get project node
projectNode = xmlDom.documentElement
if projectNode.tagName != "project":
raise ArParserError("%s: Bad root element: '%s'" % (
filePath, projectNode.tagName))
projectName = projectNode.getAttribute("name")
projectId = int(projectNode.getAttribute("id"))
projectDoc = _get_node_content(projectNode).strip()
# Check project id/name
if projectId in ctx.projectsById:
raise ArParserError("%s: Duplicate project id %d" % (
filePath, projectId))
if projectId < _MIN_PROJECT_ID or projectId > _MAX_PROJECT_ID:
raise ArParserError("%s: Invalid project id %d" % (
filePath, projectId))
if projectName in ctx.projectsByName:
raise ArParserError("%s: Duplicate project name '%s'" % (
filePath, projectName))
# Create project object
projectObj = ArProject(projectName, projectId, projectDoc)
ctx.projects.append(projectObj)
ctx.projectsById[projectId] = projectObj
ctx.projectsByName[projectName] = projectObj
# Parse project node
_parse_project_node(filePath, projectNode, projectObj)
# Convert project to feature object
featureObj = ArFeature.from_project(projectObj)
ctx.features.append(featureObj)
ctx.featuresById[featureObj.featureId] = featureObj
ctx.featuresByName[featureObj.name] = featureObj
#===============================================================================
#===============================================================================
def parse_ftr_xml(ctx, filePath):
# Parse feature xml file
try:
xmlDom = xml.dom.minidom.parse(filePath)
except Exception as ex:
raise ArParserError("Error while loading '%s': %s" % (
filePath, str(ex)))
# Get feature node
featureNode = xmlDom.documentElement
if featureNode.tagName != "feature":
raise ArParserError("%s: Bad root element: '%s'" % (
filePath, featureNode.tagName))
featureName = featureNode.getAttribute("name")
featureId = int(featureNode.getAttribute("id"))
featureDoc = _get_node_content(featureNode).strip()
# Check feature id/name
if featureId in ctx.featuresById:
raise ArParserError("%s: Duplicate feature id %d" % (
filePath, featureId))
if featureId < _MIN_FEATURE_ID or featureId > _MAX_FEATURE_ID:
raise ArParserError("%s: Invalid feature id %d" % (
filePath, featureId))
if featureName in ctx.featuresByName:
raise ArParserError("%s: Duplicate feature name '%s'" % (
filePath, featureName))
# Create feature object
featureObj = ArFeature(featureName, featureId, featureDoc)
ctx.features.append(featureObj)
ctx.featuresById[featureId] = featureObj
ctx.featuresByName[featureName] = featureObj
# Parse feature node
_parse_feature_node(ctx, filePath, featureNode, featureObj)
#===============================================================================
#===============================================================================
def _link_to_msg(ctx, link):
parts = link.split(".")
if len(parts) < 2:
return None
if not parts[0] in ctx.featuresByName:
return None
ftr = ctx.featuresByName[parts[0]]
if len(parts) == 2:
return ftr.getMsgsByName[parts[1]]
# Project part
clsName = parts[1]
cmdName = parts[2]
for cmd in ftr.cmds + ftr.evts:
if cmd.name == cmdName and cmd.cls and cmd.cls.name == clsName:
return cmd
return None
#===============================================================================
#===============================================================================
def finalize_ftrs(ctx):
# Finalize features
for ftr in ctx.features:
# Finalize multi settings
for multiset in ftr.multisets:
for link in multiset.links:
msg = _link_to_msg(ctx, link)
if not msg:
raise ArParserError("%s: Bad multisetting link '%s'" % (
filePath, link))
multiset.msgs.append(msg)
#===============================================================================
#===============================================================================
def parse_xml(ctx, filePath):
# Parse xml file
try:
xmlDom = xml.dom.minidom.parse(filePath)
except Exception as ex:
raise ArParserError("Error while loading '%s': %s" % (
filePath, str(ex)))
# Get feature node
node = xmlDom.documentElement
if node.tagName == "feature":
parse_ftr_xml(ctx, filePath)
elif node.tagName == "project":
parse_prj_xml(ctx, filePath)
#===============================================================================
#===============================================================================
def main():
ctx = ArParserCtx()
path, filename = os.path.split(os.path.realpath(__file__))
path = os.path.join(path, "xml")
# first load generic.xml
parse_xml(ctx, os.path.join(path, "generic.xml"))
for f in sorted(os.listdir(path)):
if not f.endswith(".xml") or f == "generic.xml":
continue
parse_xml(ctx, os.path.join(path, f))
# Finalize MultiSettings
finalize_ftrs(ctx)
#for prj in ctx.projects:
# print prj
# print '\n'
#for f in ctx.features:
# print f
# print '\n'
#===============================================================================
#===============================================================================
if __name__ == "__main__":
main()
|
[
"os.path.realpath",
"pprint.pformat",
"os.path.join",
"os.listdir"
] |
[((40814, 40839), 'os.path.join', 'os.path.join', (['path', '"""xml"""'], {}), "(path, 'xml')\n", (40826, 40839), False, 'import os\n'), ((40775, 40801), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (40791, 40801), False, 'import os\n'), ((40889, 40922), 'os.path.join', 'os.path.join', (['path', '"""generic.xml"""'], {}), "(path, 'generic.xml')\n", (40901, 40922), False, 'import os\n'), ((40944, 40960), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (40954, 40960), False, 'import os\n'), ((12376, 12404), 'pprint.pformat', 'pprint.pformat', (['self.argType'], {}), '(self.argType)\n', (12390, 12404), False, 'import pprint\n'), ((41064, 41085), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (41076, 41085), False, 'import os\n'), ((4626, 4654), 'pprint.pformat', 'pprint.pformat', (['self.classes'], {}), '(self.classes)\n', (4640, 4654), False, 'import pprint\n'), ((6051, 6077), 'pprint.pformat', 'pprint.pformat', (['self.enums'], {}), '(self.enums)\n', (6065, 6077), False, 'import pprint\n'), ((6095, 6125), 'pprint.pformat', 'pprint.pformat', (['self.multisets'], {}), '(self.multisets)\n', (6109, 6125), False, 'import pprint\n'), ((6143, 6168), 'pprint.pformat', 'pprint.pformat', (['self.cmds'], {}), '(self.cmds)\n', (6157, 6168), False, 'import pprint\n'), ((6186, 6211), 'pprint.pformat', 'pprint.pformat', (['self.evts'], {}), '(self.evts)\n', (6200, 6211), False, 'import pprint\n'), ((8948, 8973), 'pprint.pformat', 'pprint.pformat', (['self.cmds'], {}), '(self.cmds)\n', (8962, 8973), False, 'import pprint\n'), ((10241, 10266), 'pprint.pformat', 'pprint.pformat', (['self.args'], {}), '(self.args)\n', (10255, 10266), False, 'import pprint\n'), ((12577, 12603), 'pprint.pformat', 'pprint.pformat', (['self.enums'], {}), '(self.enums)\n', (12591, 12603), False, 'import pprint\n'), ((13082, 13107), 'pprint.pformat', 'pprint.pformat', (['self.msgs'], {}), '(self.msgs)\n', (13096, 13107), False, 'import pprint\n'), ((14308, 14335), 'pprint.pformat', 'pprint.pformat', (['self.values'], {}), '(self.values)\n', (14322, 14335), False, 'import pprint\n'), ((14813, 14838), 'pprint.pformat', 'pprint.pformat', (['self.enum'], {}), '(self.enum)\n', (14827, 14838), False, 'import pprint\n'), ((14856, 14884), 'pprint.pformat', 'pprint.pformat', (['self.btfType'], {}), '(self.btfType)\n', (14870, 14884), False, 'import pprint\n')]
|
# Copyright (c) 2015 <NAME>
# See the file LICENSE for copying permission.
import re
from . import common
from ...systems import service
class InitctlListStdoutLog(common.Log):
# tty5 start/running, process 856
# passwd stop/waiting
# network-interface-security (networking) start/running
line_re = re.compile('^(\S+) (?:(\S+) )?(\S+)/(\S+)(?:, process (\d+))?$')
# pre-stop process 928
ignored_re = re.compile('^\t\S+ \S+ \d+$')
def parse(self):
self.logger.debug('parsing')
self.data = service.Services()
self.name = 'services'
with open(self.path, 'r') as f:
for line in f.readlines():
self.parse_line(line)
def parse_line(self, line):
if InitctlListStdoutLog.ignored_re.match(line):
self.logger.debug('ignoring: %s',line)
return
matches = InitctlListStdoutLog.line_re.match(line)
assert matches
(name, instance, wanted, state, pid) = matches.groups()
if instance is not None:
name = name + ' ' + instance
assert name not in self.data
self.data[name] = service.Service(name)
self.data[name].add_upstart(wanted, state, pid)
|
[
"re.compile"
] |
[((317, 386), 're.compile', 're.compile', (['"""^(\\\\S+) (?:(\\\\S+) )?(\\\\S+)/(\\\\S+)(?:, process (\\\\d+))?$"""'], {}), "('^(\\\\S+) (?:(\\\\S+) )?(\\\\S+)/(\\\\S+)(?:, process (\\\\d+))?$')\n", (327, 386), False, 'import re\n'), ((430, 462), 're.compile', 're.compile', (['"""^\t\\\\S+ \\\\S+ \\\\d+$"""'], {}), "('^\\t\\\\S+ \\\\S+ \\\\d+$')\n", (440, 462), False, 'import re\n')]
|
from pathlib import Path
import os
import pandas as pd
import numpy as np
def get_country_geolocation():
dir_path = os.path.dirname(os.path.realpath(__file__))
country_mapping = pd.read_csv(
dir_path + '/data_files/country_centroids_az8.csv', dtype=str)
country_mapping = country_mapping.iloc[:, [48, 66, 67]]
longitude_mapping = {row['iso_n3']: row['Longitude']
for _, row in country_mapping.iterrows()}
latititude_mapping = {row['iso_n3']: row['Latitude']
for _, row in country_mapping.iterrows()}
return longitude_mapping, latititude_mapping
def get_country_isocode_mapping():
dir_path = os.path.dirname(os.path.realpath(__file__))
country_mapping = pd.read_csv(
dir_path + '/data_files/country-codes_csv.csv', dtype=str)
country_mapping = country_mapping.iloc[1:, [2, 8]]
mapping = {row['official_name_en']: row['ISO3166-1-numeric']
for _, row in country_mapping.iterrows()}
# add missing countries > 1000 students
mapping['Taiwan'] = '158'
mapping['Hong Kong'] = '364'
mapping['Iran'] = '364'
mapping['North Korea'] = '408'
mapping['South Korea'] = '410'
mapping['Vietnam'] = '704'
mapping['United Kingdom'] = '826'
mapping['Venezuela'] = '862'
mapping['Russia'] = '643'
mapping['Bolivia'] = '068'
mapping['Côte d’Ivoire/Ivory Coast'] = '384'
return mapping
def get_output_filename(path, out_folder):
outfile = Path(path).stem + '.csv'
return os.path.join(out_folder, outfile)
def write_csv(df, excel_file, out_folder, index=False):
out_csv = get_output_filename(excel_file, out_folder)
df.to_csv(out_csv, index=index)
def clean_new_enrollment(excel_file, out_folder):
df = pd.read_excel(excel_file)
# remove empty row
df = df.drop(6)
# prepare headers
headers = []
for i, column in enumerate(df.columns):
first_header = df[column].iloc[1]
if i == 0:
headers.append('Academic Level')
continue
if pd.isna(first_header):
headers.append(df[column].iloc[2])
else:
headers.append(f'{first_header} {df[column].iloc[2]}')
df.columns = headers
# chose data rows
df = df.iloc[3:8]
write_csv(df, excel_file, out_folder)
def clean_academic_level(excel_file, out_folder):
# TODO change hyphen to null
df = pd.read_excel(excel_file)
df = df.drop([2,
4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 15, 16, 17, 18,
20, 21, 22,
24, 26,
28, 29, 30, 31, 32, 33, 34])
# drop upto column 34 pre 2009/10
columns_to_drop = [i for i in range(33) if i != 1]
# drop empty columns, every third column is empty
empty_columns = [i for i in range(33, 62) if not (i+1) % 3]
columns_to_drop = list(set(columns_to_drop) | set(empty_columns))
df = df.drop(df.columns[columns_to_drop], axis=1)
df = df.reset_index(drop=True)
headers = []
for i, column in enumerate(df.columns):
if i == 0:
# print(column)
# academic level column
headers.append(df[column].iloc[1])
continue
first_header = df[column].iloc[0]
if i % 2 != 0:
year = first_header
if pd.isna(first_header):
headers.append(f'{year} {df[column].iloc[1]}')
else:
headers.append(f'{first_header} {df[column].iloc[1]}')
df.columns = headers
df = df.iloc[2:]
df = df.set_index('Academic Level').transpose()
df = df.reset_index(level=0)
df = df.rename(columns={'index': 'Year'})
# df.index.name = None
# df.columns = df.iloc[1].values
print(df)
# df = df.iloc[2:38]
write_csv(df, excel_file, out_folder)
def clean_places_of_origin(excel_file, out_folder):
df = pd.read_excel(excel_file)
df.columns = df.loc[1].values
print(df)
def clean_top25_institution(excel_file, out_folder):
df = pd.read_excel(excel_file)
columns_to_drop = [i for i in range(55)]
df = df.drop(df.columns[columns_to_drop], axis=1)
print(df)
def clean_top25_institution_csv():
dir_path = os.path.dirname(os.path.realpath(__file__))
out_folder = dir_path + '/cleaned_data_files'
csv_file = out_folder + '/Census-Top-25-Institutions.csv'
df = pd.read_csv(csv_file)
df = df.iloc[:, :6]
write_csv(df, csv_file, out_folder)
def clean_all_places_of_origin_csv():
dir_path = os.path.dirname(os.path.realpath(__file__))
out_folder = dir_path + '/cleaned_data_files'
csv_file = out_folder + '/Census-All-Places-of-Origin.csv'
df = pd.read_csv(csv_file)
# df = df.iloc[:264, 1:12]
df = pd.melt(df, id_vars=['Place of Origin'],
var_name='year', value_name='students')
mapping = get_country_isocode_mapping()
longitude_mapping, latititude_mapping = get_country_geolocation()
df = df.assign(country_code=df['Place of Origin'].map(mapping))
df = df.assign(long=df['country_code'].map(
longitude_mapping))
df = df.assign(lat=df['country_code'].map(
latititude_mapping))
df = df.replace('-', np.nan)
print(df)
# print(country_mapping)
write_csv(df, csv_file, out_folder)
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.realpath(__file__))
out_folder = dir_path + '/cleaned_data_files'
# clean_new_enrollment('data_files/Census-New-Enrollment-2009-2019.xlsx',
# out_folder)
# clean_academic_level('data_files/Census-Academic-Level.xlsx', out_folder)
# clean_places_of_origin('data_files/Census-All-Places-of-Origin.xlsx',
# out_folder)
# clean_top25_institution('data_files/Census-Top-25-Institutions.xlsx',out_folder)
# clean_top25_institution_csv()
clean_all_places_of_origin_csv()
# get_country_geolocation()
|
[
"pandas.read_csv",
"os.path.realpath",
"pandas.read_excel",
"pathlib.Path",
"pandas.melt",
"pandas.isna",
"os.path.join"
] |
[((189, 263), 'pandas.read_csv', 'pd.read_csv', (["(dir_path + '/data_files/country_centroids_az8.csv')"], {'dtype': 'str'}), "(dir_path + '/data_files/country_centroids_az8.csv', dtype=str)\n", (200, 263), True, 'import pandas as pd\n'), ((749, 819), 'pandas.read_csv', 'pd.read_csv', (["(dir_path + '/data_files/country-codes_csv.csv')"], {'dtype': 'str'}), "(dir_path + '/data_files/country-codes_csv.csv', dtype=str)\n", (760, 819), True, 'import pandas as pd\n'), ((1537, 1570), 'os.path.join', 'os.path.join', (['out_folder', 'outfile'], {}), '(out_folder, outfile)\n', (1549, 1570), False, 'import os\n'), ((1784, 1809), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (1797, 1809), True, 'import pandas as pd\n'), ((2431, 2456), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (2444, 2456), True, 'import pandas as pd\n'), ((3914, 3939), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (3927, 3939), True, 'import pandas as pd\n'), ((4052, 4077), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (4065, 4077), True, 'import pandas as pd\n'), ((4409, 4430), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (4420, 4430), True, 'import pandas as pd\n'), ((4716, 4737), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (4727, 4737), True, 'import pandas as pd\n'), ((4779, 4864), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['Place of Origin']", 'var_name': '"""year"""', 'value_name': '"""students"""'}), "(df, id_vars=['Place of Origin'], var_name='year', value_name='students'\n )\n", (4786, 4864), True, 'import pandas as pd\n'), ((139, 165), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (155, 165), False, 'import os\n'), ((699, 725), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (715, 725), False, 'import os\n'), ((2074, 2095), 'pandas.isna', 'pd.isna', (['first_header'], {}), '(first_header)\n', (2081, 2095), True, 'import pandas as pd\n'), ((3363, 3384), 'pandas.isna', 'pd.isna', (['first_header'], {}), '(first_header)\n', (3370, 3384), True, 'import pandas as pd\n'), ((4260, 4286), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4276, 4286), False, 'import os\n'), ((4566, 4592), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4582, 4592), False, 'import os\n'), ((5387, 5413), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5403, 5413), False, 'import os\n'), ((1501, 1511), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1505, 1511), False, 'from pathlib import Path\n')]
|
import numpy as np
from sklearn.preprocessing import Imputer, StandardScaler
from matplotlib import pyplot as plt
data = np.load('sample.npy')
# Plot raw data.
plt.figure(1)
plt.plot(data)
# Impute missing values.
imputer = Imputer()
data = imputer.fit_transform(data)
plt.figure(2)
plt.plot(data)
# Scale data.
scaler = StandardScaler()
data = scaler.fit_transform(data)
plt.figure(3)
plt.plot(data)
plt.show()
|
[
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.preprocessing.Imputer",
"matplotlib.pyplot.figure"
] |
[((122, 143), 'numpy.load', 'np.load', (['"""sample.npy"""'], {}), "('sample.npy')\n", (129, 143), True, 'import numpy as np\n'), ((162, 175), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (172, 175), True, 'from matplotlib import pyplot as plt\n'), ((176, 190), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (184, 190), True, 'from matplotlib import pyplot as plt\n'), ((227, 236), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {}), '()\n', (234, 236), False, 'from sklearn.preprocessing import Imputer, StandardScaler\n'), ((273, 286), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (283, 286), True, 'from matplotlib import pyplot as plt\n'), ((287, 301), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (295, 301), True, 'from matplotlib import pyplot as plt\n'), ((326, 342), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (340, 342), False, 'from sklearn.preprocessing import Imputer, StandardScaler\n'), ((378, 391), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (388, 391), True, 'from matplotlib import pyplot as plt\n'), ((392, 406), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (400, 406), True, 'from matplotlib import pyplot as plt\n'), ((408, 418), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (416, 418), True, 'from matplotlib import pyplot as plt\n')]
|
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.internals import (
isCompiled,
typeKnownToCompiler,
localVariableTypesKnownToCompiler
)
from typed_python.compiler.type_wrappers.wrapper import Wrapper
import typed_python.compiler.native_ast as native_ast
import typed_python
class IsCompiledWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(isCompiled)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if args or kwargs:
context.pushException(TypeError, "isCompiled() accepts no arguments")
return context.constant(True)
class TypeKnownToCompiler(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(typeKnownToCompiler)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) != 1 or kwargs:
context.pushException(TypeError, "typeKnownToCompiler() accepts 1 positional argument")
return typed_python.compiler.python_object_representation.pythonObjectRepresentation(
context,
args[0].expr_type.typeRepresentation
)
class LocalVariableTypesKnownToCompiler(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(localVariableTypesKnownToCompiler)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if args or kwargs:
context.pushException(TypeError, "localVariableTypesKnownToCompiler() accepts no arguments")
return context.constant(dict(context.variableStates._types), allowArbitrary=True)
|
[
"typed_python.compiler.native_ast.Type.Void",
"typed_python.compiler.python_object_representation.pythonObjectRepresentation"
] |
[((1081, 1103), 'typed_python.compiler.native_ast.Type.Void', 'native_ast.Type.Void', ([], {}), '()\n', (1101, 1103), True, 'import typed_python.compiler.native_ast as native_ast\n'), ((1536, 1558), 'typed_python.compiler.native_ast.Type.Void', 'native_ast.Type.Void', ([], {}), '()\n', (1556, 1558), True, 'import typed_python.compiler.native_ast as native_ast\n'), ((1770, 1899), 'typed_python.compiler.python_object_representation.pythonObjectRepresentation', 'typed_python.compiler.python_object_representation.pythonObjectRepresentation', (['context', 'args[0].expr_type.typeRepresentation'], {}), '(\n context, args[0].expr_type.typeRepresentation)\n', (1847, 1899), False, 'import typed_python\n'), ((2183, 2205), 'typed_python.compiler.native_ast.Type.Void', 'native_ast.Type.Void', ([], {}), '()\n', (2203, 2205), True, 'import typed_python.compiler.native_ast as native_ast\n')]
|
#!/usr/bin/python3
import sys
import cgi
import cgitb
import json
import cgicommon
def send_error_reply(description):
reply = dict()
reply["success"] = False
reply["description"] = description
json.dump(reply, sys.stdout)
cgitb.enable()
cgicommon.writeln("Content-Type: application/json; charset=utf-8")
cgicommon.writeln("")
form = cgi.FieldStorage()
tourney_name = form.getfirst("tourney")
request = form.getfirst("request")
cgicommon.set_module_path()
import countdowntourney
import tourney2json
options = dict()
for option_name in form:
options[option_name] = form.getfirst(option_name)
if tourney_name is None:
send_error_reply("Bad request: no tourney name specified.")
sys.exit(0)
if request is None:
# Information we fetch depends on current mode
request = "default"
if request not in tourney2json.valid_requests:
send_error_reply("Bad request: request type \"%s\" is not recognised." % (request))
sys.exit(0)
try:
tourney = countdowntourney.tourney_open(tourney_name, cgicommon.dbdir)
reply_object = tourney2json.valid_requests[request](tourney, options)
except countdowntourney.TourneyException as e:
send_error_reply(e.description)
sys.exit(0)
json.dump(reply_object, sys.stdout, indent=4)
sys.exit(0)
|
[
"json.dump",
"cgitb.enable",
"cgi.FieldStorage",
"cgicommon.writeln",
"countdowntourney.tourney_open",
"cgicommon.set_module_path",
"sys.exit"
] |
[((241, 255), 'cgitb.enable', 'cgitb.enable', ([], {}), '()\n', (253, 255), False, 'import cgitb\n'), ((257, 323), 'cgicommon.writeln', 'cgicommon.writeln', (['"""Content-Type: application/json; charset=utf-8"""'], {}), "('Content-Type: application/json; charset=utf-8')\n", (274, 323), False, 'import cgicommon\n'), ((324, 345), 'cgicommon.writeln', 'cgicommon.writeln', (['""""""'], {}), "('')\n", (341, 345), False, 'import cgicommon\n'), ((354, 372), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (370, 372), False, 'import cgi\n'), ((449, 476), 'cgicommon.set_module_path', 'cgicommon.set_module_path', ([], {}), '()\n', (474, 476), False, 'import cgicommon\n'), ((1229, 1274), 'json.dump', 'json.dump', (['reply_object', 'sys.stdout'], {'indent': '(4)'}), '(reply_object, sys.stdout, indent=4)\n', (1238, 1274), False, 'import json\n'), ((1276, 1287), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1284, 1287), False, 'import sys\n'), ((211, 239), 'json.dump', 'json.dump', (['reply', 'sys.stdout'], {}), '(reply, sys.stdout)\n', (220, 239), False, 'import json\n'), ((713, 724), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (721, 724), False, 'import sys\n'), ((961, 972), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (969, 972), False, 'import sys\n'), ((993, 1053), 'countdowntourney.tourney_open', 'countdowntourney.tourney_open', (['tourney_name', 'cgicommon.dbdir'], {}), '(tourney_name, cgicommon.dbdir)\n', (1022, 1053), False, 'import countdowntourney\n'), ((1216, 1227), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1224, 1227), False, 'import sys\n')]
|
import connexion
import psycopg2
import six
import os
from swagger_server.models.accident import Accident # noqa: E501
from swagger_server.models.api_response import ApiResponse # noqa: E501
from swagger_server import util
def accident_delete(body): # noqa: E501
"""Delete a record of an accident
# noqa: E501
:param body: Created user object
:type body: dict | bytes
:rtype: ApiResponse
"""
if connexion.request.is_json:
body = Accident.from_dict(connexion.request.get_json()) # noqa: E501
# Create connection to the DB and cursor.
if os.environ["DATABASE_URL"]:
conn = psycopg2.connect(os.environ["DATABASE_URL"])
else:
conn = psycopg2.connect(
"host=localhost dbname=accidents_raw user=postgres password=password")
cur = conn.cursor()
statement = "DELETE FROM utilized_accidents WHERE ST_CASE = " \
+ body.st_case + ";"
cur.execute(statement)
# Print result from delete command
print(cur.statusmessage)
conn.commit()
response = ApiResponse(code=200, type="Good", message="Successful delete")
return response
def accident_get(st_case=None, state=None, fatals=None): # noqa: E501
"""Get accident record(s)
Get an accident's information by input ST_CASE # noqa: E501
:param st_case: ST_CASE value of the object(s) to be returned
:type st_case: List[str]
:param state: STATE value of the object(s) to be returned
:type state: List[int]
:rtype: List[Accident]
"""
# Create connection to the DB and cursor.
if os.environ["DATABASE_URL"]:
conn = psycopg2.connect(os.environ["DATABASE_URL"])
else:
conn = psycopg2.connect(
"host=localhost dbname=accidents_raw user=postgres password=password")
cur = conn.cursor()
statement = "SELECT * FROM utilized_accidents WHERE"
toCheck = [st_case, state, fatals]
toRemove = []
for i in range(len(toCheck)):
if toCheck[i] == None:
toRemove.append(i)
# toCheck.pop(i)
toRemove.reverse()
for j in toRemove:
toCheck.pop(j)
if toCheck == []:
statement = "SELECT * FROM utilized_accidents"
if st_case != None and state != None and fatals != None:
statement += " ST_CASE IN ("
if len(st_case) > 1:
for caseNum in st_case:
statement += caseNum
if caseNum != st_case[-1]:
statement += ", "
else:
statement += st_case[0]
statement += ") AND STATE IN ("
if len(state) > 1:
for stateName in state:
statement += str(stateName)
if stateName != state[-1]:
statement += ", "
else:
statement += str(state[0])
statement += ") AND FATALS IN ("
if len(fatals) > 1:
for fatal in fatals:
statement += str(fatal)
if fatal != fatals[-1]:
statement += ", "
else:
statement += str(fatals[0])
statement += ")"
else:
if st_case != None:
statement += " ST_CASE IN ("
if len(st_case) > 1:
for caseNum in st_case:
statement += caseNum
if caseNum != st_case[-1]:
statement += ", "
else:
statement += st_case[0]
toCheck.remove(st_case)
print(len(toCheck))
if len(toCheck) > 0:
statement += ") AND"
else:
statement += ")"
if state != None:
statement += " STATE IN ("
if len(state) > 1:
for stateNum in state:
statement += str(stateNum)
if stateNum != state[-1]:
statement += ", "
else:
statement += str(state[0])
toCheck.remove(state)
if len(toCheck) > 0:
statement += ") AND"
else:
statement += ")"
if fatals != None:
statement += " FATALS IN ("
if len(fatals) > 1:
for fatal in fatals:
statement += str(fatal)
if fatal != fatals[-1]:
statement += ", "
else:
statement += str(fatals[0])
toCheck.remove(fatals)
if len(toCheck) > 0:
statement += ") AND"
else:
statement += ")"
statement += ";"
cur.execute(statement)
returnAcc = []
for record in cur.fetchall():
tempAccident = Accident(state=record[0], st_case=record[1],
fatals=record[-2])
returnAcc.append(tempAccident)
return returnAcc
|
[
"swagger_server.models.api_response.ApiResponse",
"connexion.request.get_json",
"swagger_server.models.accident.Accident",
"psycopg2.connect"
] |
[((1054, 1117), 'swagger_server.models.api_response.ApiResponse', 'ApiResponse', ([], {'code': '(200)', 'type': '"""Good"""', 'message': '"""Successful delete"""'}), "(code=200, type='Good', message='Successful delete')\n", (1065, 1117), False, 'from swagger_server.models.api_response import ApiResponse\n'), ((635, 679), 'psycopg2.connect', 'psycopg2.connect', (["os.environ['DATABASE_URL']"], {}), "(os.environ['DATABASE_URL'])\n", (651, 679), False, 'import psycopg2\n'), ((705, 797), 'psycopg2.connect', 'psycopg2.connect', (['"""host=localhost dbname=accidents_raw user=postgres password=password"""'], {}), "(\n 'host=localhost dbname=accidents_raw user=postgres password=password')\n", (721, 797), False, 'import psycopg2\n'), ((1627, 1671), 'psycopg2.connect', 'psycopg2.connect', (["os.environ['DATABASE_URL']"], {}), "(os.environ['DATABASE_URL'])\n", (1643, 1671), False, 'import psycopg2\n'), ((1697, 1789), 'psycopg2.connect', 'psycopg2.connect', (['"""host=localhost dbname=accidents_raw user=postgres password=password"""'], {}), "(\n 'host=localhost dbname=accidents_raw user=postgres password=password')\n", (1713, 1789), False, 'import psycopg2\n'), ((4751, 4814), 'swagger_server.models.accident.Accident', 'Accident', ([], {'state': 'record[0]', 'st_case': 'record[1]', 'fatals': 'record[-2]'}), '(state=record[0], st_case=record[1], fatals=record[-2])\n', (4759, 4814), False, 'from swagger_server.models.accident import Accident\n'), ((494, 522), 'connexion.request.get_json', 'connexion.request.get_json', ([], {}), '()\n', (520, 522), False, 'import connexion\n')]
|
# Generated by Django 2.1.4 on 2019-02-26 02:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='IFrameEnabledSites',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(db_index=True, max_length=100, unique=True)),
('enabled', models.BooleanField(default=False)),
],
),
migrations.AlterField(
model_name='opinionpoll',
name='email',
field=models.EmailField(db_index=True, max_length=100, unique=True),
),
]
|
[
"django.db.models.CharField",
"django.db.models.AutoField",
"django.db.models.BooleanField",
"django.db.models.EmailField"
] |
[((714, 775), 'django.db.models.EmailField', 'models.EmailField', ([], {'db_index': '(True)', 'max_length': '(100)', 'unique': '(True)'}), '(db_index=True, max_length=100, unique=True)\n', (731, 775), False, 'from django.db import migrations, models\n'), ((328, 421), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (344, 421), False, 'from django.db import migrations, models\n'), ((447, 507), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(100)', 'unique': '(True)'}), '(db_index=True, max_length=100, unique=True)\n', (463, 507), False, 'from django.db import migrations, models\n'), ((538, 572), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (557, 572), False, 'from django.db import migrations, models\n')]
|
import json
import time
class Version:
def __init__(self, version: str, remote_path: str):
self.version = version
self.remote = remote_path
self.part_files = []
self.properties = {}
self.transforms = []
self.time = time.time()
def add_part_file(self, remote_path: str):
self.part_files.append(remote_path)
def add_property(self, key: str, value: object):
self.properties[key] = value
def get_property(self, key: str):
return self.properties[key] if key in self.properties else None
def get_uploadtime(self):
return self.time
def add_transform(self, name: str):
self.transforms.append(name)
def get_identifier(self):
return self.version
def get_parts(self):
return self.part_files
@staticmethod
def to_json_object(version):
return {
'version': version.version,
'remote': version.remote,
'uploadtime': version.time,
'parts': version.part_files,
'properties': version.properties,
'transforms': version.transforms
}
@staticmethod
def from_json_object(json_object: object):
version = Version(json_object['version'], json_object['remote'])
version.part_files = json_object['parts']
version.properties = json_object['properties']
version.transforms = json_object['transforms']
version.time = json_object['uploadtime']
return version
class FileMetadata:
def __init__(self):
self.versions = {}
def contains_version(self, identifier: str):
return identifier in self.versions
def get_version(self, identifier: str):
return self.versions[identifier]
def get_latest_version(self):
max_time = 0
max_version = None
for key in self.versions:
version = self.versions[key]
if version.get_uploadtime() > max_time:
max_time = version.get_uploadtime()
max_version = version
return max_version
def get_version_count(self):
return len(self.versions)
def update_version(self, version: Version):
self.versions[version.get_identifier()] = version
@staticmethod
def to_json(file_metadata):
version_dict = {file_metadata.versions[version].get_identifier(): Version.to_json_object(
file_metadata.versions[version]) for version in file_metadata.versions}
return json.dumps({
'versions': version_dict
})
@staticmethod
def from_json(json_str: str):
json_object = json.loads(json_str)
metadata = FileMetadata()
for version in json_object['versions']:
constructed_version = Version.from_json_object(
json_object['versions'][version])
metadata.update_version(constructed_version)
return metadata
|
[
"json.loads",
"json.dumps",
"time.time"
] |
[((270, 281), 'time.time', 'time.time', ([], {}), '()\n', (279, 281), False, 'import time\n'), ((2529, 2567), 'json.dumps', 'json.dumps', (["{'versions': version_dict}"], {}), "({'versions': version_dict})\n", (2539, 2567), False, 'import json\n'), ((2665, 2685), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (2675, 2685), False, 'import json\n')]
|
import logging
from parse import Parse
if __name__ == '__main__':
logging.basicConfig(level="DEBUG",
filename="/var/log/dp_more.log",
format="%(asctime)s[%(levelname)s][%(filename)s.%(funcName)s]%(message)s")
Parse().parse_all_info()
|
[
"parse.Parse",
"logging.basicConfig"
] |
[((71, 218), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""DEBUG"""', 'filename': '"""/var/log/dp_more.log"""', 'format': '"""%(asctime)s[%(levelname)s][%(filename)s.%(funcName)s]%(message)s"""'}), "(level='DEBUG', filename='/var/log/dp_more.log', format=\n '%(asctime)s[%(levelname)s][%(filename)s.%(funcName)s]%(message)s')\n", (90, 218), False, 'import logging\n'), ((266, 273), 'parse.Parse', 'Parse', ([], {}), '()\n', (271, 273), False, 'from parse import Parse\n')]
|
from adventofcode.utils import open_input
def main():
data = open_input('adventofcode/_2015/day2/input.txt')
answer_1, answer_2 = get_answer(data)
print(answer_1, answer_2)
return answer_1, answer_2
def get_answer(data):
total_wrapping_paper_required = total_ribbon_required = 0
for package in data:
package_dims = package.lower().split('x')
length = int(package_dims[0])
width = int(package_dims[1])
height = int(package_dims[2])
length_area = length * width
width_area = width * height
height_area = height * length
# part 1
total_surface_area = 2 * (length_area + width_area + height_area)
smallest_size = min(length_area, width_area, height_area)
total_wrapping_paper_required += total_surface_area + smallest_size
# part 2
cubic_area = length * width * height # the bow is the cubic_area of the package
sorted_dims = sorted([length, width, height])
ribbon_required = (sorted_dims[0] * 2) + (sorted_dims[1] * 2)
total_ribbon_required += ribbon_required + cubic_area
return total_wrapping_paper_required, total_ribbon_required
if __name__ == '__main__':
main()
|
[
"adventofcode.utils.open_input"
] |
[((67, 114), 'adventofcode.utils.open_input', 'open_input', (['"""adventofcode/_2015/day2/input.txt"""'], {}), "('adventofcode/_2015/day2/input.txt')\n", (77, 114), False, 'from adventofcode.utils import open_input\n')]
|
from behave import given, when, then, step
from kss.util import command
# MARK: Internal Utilities
def _find_file_match(pattern: str) -> str:
files = []
for line in command.process("find dist -name '%s'" % pattern):
files.append(line)
if len(files) == 0:
raise RuntimeError("Could not find a dist file matching '%s'" % pattern)
if len(files) > 1:
raise RuntimeError("Found more than one dist file matching '%s'" % pattern)
return files[0]
def _has_match(cmd: str, match: str) -> bool:
for line in command.process(cmd):
if line.find(match) != -1:
return True
return False
# MARK: Whens
@when(u'we build the installation packages')
def step_impl(context):
pass
# MARK: Thens
@then(u'the source distribution should include the resources')
def step_impl(context):
filename = _find_file_match("*.tar.gz")
target = 'kss/license/resources/spdx-licenses.json'
cmd = "tar tzf %s" % filename
assert _has_match(cmd, target), "Should find '%s' in %s" % (target, filename)
@then(u'the binary distribution should include the resources')
def step_impl(context):
filename = _find_file_match("*.whl")
target = 'kss/license/resources/spdx-licenses.json'
cmd = "unzip -l %s" % filename
assert _has_match(cmd, target), "Should find '%s' in %s" % (target, filename)
|
[
"kss.util.command.process",
"behave.when",
"behave.then"
] |
[((664, 707), 'behave.when', 'when', (['u"""we build the installation packages"""'], {}), "(u'we build the installation packages')\n", (668, 707), False, 'from behave import given, when, then, step\n'), ((759, 820), 'behave.then', 'then', (['u"""the source distribution should include the resources"""'], {}), "(u'the source distribution should include the resources')\n", (763, 820), False, 'from behave import given, when, then, step\n'), ((1064, 1125), 'behave.then', 'then', (['u"""the binary distribution should include the resources"""'], {}), "(u'the binary distribution should include the resources')\n", (1068, 1125), False, 'from behave import given, when, then, step\n'), ((175, 224), 'kss.util.command.process', 'command.process', (['("find dist -name \'%s\'" % pattern)'], {}), '("find dist -name \'%s\'" % pattern)\n', (190, 224), False, 'from kss.util import command\n'), ((548, 568), 'kss.util.command.process', 'command.process', (['cmd'], {}), '(cmd)\n', (563, 568), False, 'from kss.util import command\n')]
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^join/$', views.join, name="players-join"),
url(r'^leave/([1-4])$', views.leave, name="players-leave"),
]
|
[
"django.conf.urls.url"
] |
[((75, 122), 'django.conf.urls.url', 'url', (['"""^join/$"""', 'views.join'], {'name': '"""players-join"""'}), "('^join/$', views.join, name='players-join')\n", (78, 122), False, 'from django.conf.urls import url\n'), ((129, 186), 'django.conf.urls.url', 'url', (['"""^leave/([1-4])$"""', 'views.leave'], {'name': '"""players-leave"""'}), "('^leave/([1-4])$', views.leave, name='players-leave')\n", (132, 186), False, 'from django.conf.urls import url\n')]
|
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import os
from pathlib import Path
import pytest
from rainbow.util import VID_FILE_EXT, load_nd2_imgs, load_std_imgs, save_video
from tests import IMG_SER_DIR, ND2_PATH
@pytest.fixture
def axs_config():
return {'iter_axs': ['v', 't'],
'bdl_axs': ['y', 'x'],
'naming_axs': 'v'
}
def test_load_std_imgs(tmpdir):
imgs = load_std_imgs(IMG_SER_DIR, 1)
assert len(imgs) == 3
for i, img in enumerate(imgs, start=1):
assert str(i) in img.metadata['img_name']
assert img.metadata['mpp'] == 1
assert len(load_std_imgs('')) == 0
assert len(load_std_imgs(tmpdir)) == 0
def test_load_nd2_imgs(axs_config):
imgs = load_nd2_imgs(ND2_PATH, axs_config)
assert len(imgs) != 0 # TODO specfic
assert len(imgs[0]) != 0
assert imgs[3][0].shape == (1024, 1280, 3)
assert len(imgs[0][0].metadata) != 0
assert imgs[6][0].metadata['type'] == '.nd2'
assert imgs[9][0].metadata['path'] == ND2_PATH
assert imgs[12][0].metadata['img_name'] == 'Image_0.png'
def test_save_video(tmpdir):
assert save_video(IMG_SER_DIR, os.path.join(tmpdir, 'test_video'))
video = os.path.join(tmpdir, next(os.walk(tmpdir))[2][0])
assert Path(video).suffix == VID_FILE_EXT
|
[
"os.walk",
"rainbow.util.load_nd2_imgs",
"pathlib.Path",
"rainbow.util.load_std_imgs",
"os.path.join"
] |
[((487, 516), 'rainbow.util.load_std_imgs', 'load_std_imgs', (['IMG_SER_DIR', '(1)'], {}), '(IMG_SER_DIR, 1)\n', (500, 516), False, 'from rainbow.util import VID_FILE_EXT, load_nd2_imgs, load_std_imgs, save_video\n'), ((809, 844), 'rainbow.util.load_nd2_imgs', 'load_nd2_imgs', (['ND2_PATH', 'axs_config'], {}), '(ND2_PATH, axs_config)\n', (822, 844), False, 'from rainbow.util import VID_FILE_EXT, load_nd2_imgs, load_std_imgs, save_video\n'), ((1231, 1265), 'os.path.join', 'os.path.join', (['tmpdir', '"""test_video"""'], {}), "(tmpdir, 'test_video')\n", (1243, 1265), False, 'import os\n'), ((693, 710), 'rainbow.util.load_std_imgs', 'load_std_imgs', (['""""""'], {}), "('')\n", (706, 710), False, 'from rainbow.util import VID_FILE_EXT, load_nd2_imgs, load_std_imgs, save_video\n'), ((732, 753), 'rainbow.util.load_std_imgs', 'load_std_imgs', (['tmpdir'], {}), '(tmpdir)\n', (745, 753), False, 'from rainbow.util import VID_FILE_EXT, load_nd2_imgs, load_std_imgs, save_video\n'), ((1340, 1351), 'pathlib.Path', 'Path', (['video'], {}), '(video)\n', (1344, 1351), False, 'from pathlib import Path\n'), ((1305, 1320), 'os.walk', 'os.walk', (['tmpdir'], {}), '(tmpdir)\n', (1312, 1320), False, 'import os\n')]
|
from fastapi import APIRouter, Depends, Header, HTTPException, status
from ..database import Models, crud
from app.database.conn import get_db
from sqlalchemy.orm import Session
from sqlalchemy.exc import SQLAlchemyError
from typing import List, Union
from ..util import convert_date, token_verification, create_api_token
router = APIRouter(
prefix="/record",
tags=["Record"],
dependencies=[],
)
# Create default training record.
def initialize_t(user_id, wdate, db):
try:
obj = crud.read_objective(db=db, user_id=user_id)
if obj:
routines = {}
for routine in obj.routines:
routines.update({routine: False})
else:
routines = None
tr = {
"user_id": user_id,
"written": wdate,
"content": {
"train_detail": {"content": None},
"routines": routines,
"success": {"content": None, "image": []},
"failure": {"content": None, "image": []},
},
"feedback": None,
}
crud.create_tr(tr=Models.Training(**tr), db=db)
except SQLAlchemyError as sql:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"SQL operation failed.": sql},)
# Create default conditioning record.
def initialize_c(user_id, wdate, db):
cr = {
"user_id": user_id,
"written": wdate,
"content": {
"mind": [],
"physical": [],
"injury": [],
}
}
try:
crud.create_cr(cr=Models.Condition(**cr), db=db)
except SQLAlchemyError as sql:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"SQL operation failed.": sql},
)
# Create/overwrites tr/cr record of given date.
@router.post("/write/{user_id}")
async def write(user_id: str, wdate: str, key_type: str, content: Union[str, dict, List[str], List[dict]],
db: Session = Depends(get_db),
token=Header(..., title="API_Token")):
"""
:param db: Connection to database. This field is not needed.\n
:param user_id: str\n
- User_id of the owner of the record.\n
:param wdate: str\n
- Date of the record. Must be something like 'Fri Nov 05 2021'\n
:param key_type: str\n
- Identifier of Updating part of the record. key_type must be one of:\n
"train_detail"\n
"routines"\n
"success"\n
"failure"\n
"feedback"\n
"mind"\n
"physical"\n
"injury"\n
:param content: Writing content\n
content: Updating value.\n
Type of content should be one of [str, dict, list[str], list[dict]]\n
:param token: API_Token you received when you registered in.\n
:return: 200 Ok on Success.\n
"""
if not token_verification(token=token, user_id=user_id):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid Token",
)
try:
d = convert_date(wdate).get("date")
if d is None:
raise ValueError
except ValueError:
raise HTTPException(
status_code=status.HTTP_406_NOT_ACCEPTABLE,
detail="Value Error"
)
try:
tr_record = crud.read_tr(db=db, user_id=user_id, wdate=d, number=1)
cr_record = crud.read_cr(db=db, user_id=user_id, wdate=d, number=1)
# initializing part: create record if there isn't one.
if len(tr_record) == 0:
initialize_t(user_id=user_id, wdate=d, db=db)
tr_record = crud.read_tr(db=db, user_id=user_id, wdate=d, number=1)
if len(cr_record) == 0:
initialize_c(user_id=user_id, wdate=d, db=db)
cr_record = crud.read_cr(db=db, user_id=user_id, wdate=d, number=1)
# updating part
tr = tr_record[0]
cr = cr_record[0]
# training data update
if key_type == "train_detail":
detail = {"content": content}
tr.content["train_detail"] = detail
elif key_type == "routines":
tr.content["routines"] = content.get("content")
elif key_type == "success":
url = tr.content.get("success").get("image")
success = {"content": content, "image": url}
tr.content["success"] = success
elif key_type == "failure":
url = tr.content.get("failure").get("image")
failure = {"content": content, "image": url}
tr.content["failure"] = failure
elif key_type == "feedback":
tr.feedback = content
# conditioning data update
elif key_type == "mind":
cr.content["mind"] = content.get("content")
elif key_type == "physical":
cr.content["physical"] = content.get("content")
elif key_type == "injury":
cr.content["injury"] = content.get("content")
crud.update_tr(db=db, user_id=user_id, content=tr.content, wdate=d, feedback=tr.feedback)
crud.update_cr(db=db, user_id=user_id, content=cr.content, wdate=d)
except SQLAlchemyError as sql:
raise HTTPException(
status_code=status.HTTP_406_NOT_ACCEPTABLE,
detail={"SQL operation failed.": sql},
)
except KeyError:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={"KeyError": "key probably doesn't exist."}
)
return {"status": status.HTTP_201_CREATED}
# Read tr/cr record of given user_id and given date.
@router.get("/read/{user_id}")
async def read(user_id: str, wdate: str, db: Session = Depends(get_db), token=Header(..., title="API_Token")):
"""
:param user_id: User_id of the owner of the record.\n
:param wdate: Date of the record. Must be something like 'Fri Nov 05 2021'\n
:param db: This field is not required.\n
:param token: API_Token you received when you registered in.\n
:return: User record of given date.
"""
"""
sample = {\n
"wdate": 'Fri Nov 15 2021',\n
"noteContentGroup": {\n
"training": {\n
"train_detail": "노트내용",\n
"routines": {"routine_name1": "done",\n
"routine_name2": "done"},\n
"success": "뭔가 잘한것",\n
"failure": "뭔가 못한것",\n
},\n
"feedback": "피드백 내용",\n
"conditioning": {\n
"mind": ['정신이 번쩍'],\n
"physical": [],\n
"injury": []\n
},\n
}\n
}\n
"""
# verify API_Token
if not token_verification(token=token, user_id=user_id):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid Token",
)
try:
# check if user exists
user = crud.read_user(db=db, user_id=user_id)
if user is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="user not found",
)
except SQLAlchemyError as sql:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"SQL operation failed.": sql},
)
except Exception as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={"Unexpected Error": e}
)
else:
try:
d = convert_date(wdate).get("date")
tr = crud.read_tr(user_id=user_id, wdate=d, db=db, number=1)
training = {
"train_detail": {"content": None},
"routines": None,
"success": {"content": None, "image": []},
"failure": {"content": None, "image": []},
}
# set training return message if record doesn't exist.
if len(tr) == 0 or tr is None:
obj = crud.read_objective(db=db, user_id=user_id)
if obj:
routines = obj.routines
if routines:
tr_routine = {}
for routine in routines:
tr_routine.update({routine: False})
training["routines"] = tr_routine
feedback = None
else:
training = tr[0].content
feedback = tr[0].feedback
cr = crud.read_cr(user_id=user_id, wdate=d, db=db, number=1)
# set training return message if record doesn't exist.
if len(cr) == 0 or cr is None:
conditioning = {
"mind": [],
"physical": [],
"injury": [],
}
else:
conditioning = cr[0].content
# put together into one response object
res = {
"date": wdate,
"noteContentGroup": {
"training": training,
"feedback": feedback,
"conditioning": conditioning,
}
}
return res
except (TypeError, ValueError):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Check if user_id has valid value or date is in right format. Example: 'Fri Nov 05 2021'",
)
# API for testing
@router.get("/token_test")
async def test(user_id: str, token=Header(..., title="API_Token")):
if not token_verification(token, user_id):
return {"Verification": "Failure"}
else:
return {"Verification": "Success"}
@router.get("/my_token")
async def get_token(user_id: str):
token = create_api_token(user_id)
return token
|
[
"fastapi.HTTPException",
"fastapi.Header",
"fastapi.Depends",
"fastapi.APIRouter"
] |
[((333, 394), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/record"""', 'tags': "['Record']", 'dependencies': '[]'}), "(prefix='/record', tags=['Record'], dependencies=[])\n", (342, 394), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((2057, 2072), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (2064, 2072), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((2096, 2126), 'fastapi.Header', 'Header', (['...'], {'title': '"""API_Token"""'}), "(..., title='API_Token')\n", (2102, 2126), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((5735, 5750), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (5742, 5750), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((5758, 5788), 'fastapi.Header', 'Header', (['...'], {'title': '"""API_Token"""'}), "(..., title='API_Token')\n", (5764, 5788), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((9591, 9621), 'fastapi.Header', 'Header', (['...'], {'title': '"""API_Token"""'}), "(..., title='API_Token')\n", (9597, 9621), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((2964, 3043), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': '"""Invalid Token"""'}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid Token')\n", (2977, 3043), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((6786, 6865), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_401_UNAUTHORIZED', 'detail': '"""Invalid Token"""'}), "(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid Token')\n", (6799, 6865), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((1195, 1303), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_500_INTERNAL_SERVER_ERROR', 'detail': "{'SQL operation failed.': sql}"}), "(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={\n 'SQL operation failed.': sql})\n", (1208, 1303), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((1698, 1806), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_500_INTERNAL_SERVER_ERROR', 'detail': "{'SQL operation failed.': sql}"}), "(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={\n 'SQL operation failed.': sql})\n", (1711, 1806), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((3220, 3299), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_406_NOT_ACCEPTABLE', 'detail': '"""Value Error"""'}), "(status_code=status.HTTP_406_NOT_ACCEPTABLE, detail='Value Error')\n", (3233, 3299), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((5239, 5340), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_406_NOT_ACCEPTABLE', 'detail': "{'SQL operation failed.': sql}"}), "(status_code=status.HTTP_406_NOT_ACCEPTABLE, detail={\n 'SQL operation failed.': sql})\n", (5252, 5340), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((5406, 5516), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '{\'KeyError\': "key probably doesn\'t exist."}'}), '(status_code=status.HTTP_400_BAD_REQUEST, detail={\'KeyError\':\n "key probably doesn\'t exist."})\n', (5419, 5516), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((7038, 7115), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': '"""user not found"""'}), "(status_code=status.HTTP_404_NOT_FOUND, detail='user not found')\n", (7051, 7115), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((7212, 7320), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_500_INTERNAL_SERVER_ERROR', 'detail': "{'SQL operation failed.': sql}"}), "(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail={\n 'SQL operation failed.': sql})\n", (7225, 7320), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((7392, 7483), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': "{'Unexpected Error': e}"}), "(status_code=status.HTTP_400_BAD_REQUEST, detail={\n 'Unexpected Error': e})\n", (7405, 7483), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n'), ((9309, 9471), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Check if user_id has valid value or date is in right format. Example: \'Fri Nov 05 2021\'"""'}), '(status_code=status.HTTP_400_BAD_REQUEST, detail=\n "Check if user_id has valid value or date is in right format. Example: \'Fri Nov 05 2021\'"\n )\n', (9322, 9471), False, 'from fastapi import APIRouter, Depends, Header, HTTPException, status\n')]
|
# 3rd party modules
import gym
import numpy as np
import subprocess
import os
from gym import spaces
from basilisk_env.simulators import opNavSimulator
class opNavEnv(gym.Env):
"""
OpNav scenario. The spacecraft must decide when to point at the ground (which generates a
reward) versus pointing at the sun (which increases the sim duration).
"""
def __init__(self):
self.__version__ = "0.0.2"
print("Basilisk OpNav Mode Management Sim - Version {}".format(self.__version__))
# General variables defining the environment
self.max_length =int(40) # Specify the maximum number of planning intervals
# Tell the environment that it doesn't have a sim attribute...
self.sim_init = 0
self.simulator = None
self.reward_total = 0
# Set up options, constants for this environment
self.step_duration = 50. # Set step duration equal to 60 minute
self.reward_mult = 1.
low = -1e16
high = 1e16
self.observation_space = spaces.Box(low, high,shape=(4,1))
self.obs = np.zeros([4,])
self.debug_states = np.zeros([12,])
## Action Space description
# 0 - earth pointing (mission objective)
# 1 - sun pointing (power objective)
# 2 - desaturation (required for long-term pointing)
self.action_space = spaces.Discrete(2)
# Store what the agent tried
self.curr_episode = -1
self.action_episode_memory = []
self.curr_step = 0
self.episode_over = False
def _seed(self):
np.random.seed()
return
def step(self, action):
"""
The agent takes a step in the environment.
Parameters
----------
action : int
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
an environment-specific object representing your observation of
the environment.
reward (float) :
amount of reward achieved by the previous action. The scale
varies between environments, but the goal is always to increase
your total reward.
episode_over (bool) :
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict) :
diagnostic information useful for debugging. It can sometimes
be useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change).
However, official evaluations of your agent are not allowed to
use this for learning.
"""
if self.sim_init == 0:
self.simulator = opNavSimulator.scenario_OpNav(1., 1., self.step_duration)
self.sim_init = 1
if self.curr_step%10 == 0:
print("At step ", self.curr_step, " of ", self.max_length)
if self.curr_step >= self.max_length:
self.episode_over = True
prev_ob = self._get_state()
self._take_action(action)
reward = self._get_reward()
self.reward_total += reward
ob = self._get_state()
if self.sim_over:
self.episode_over = True
print("End of episode")
if self.episode_over:
info = {'episode':{
'r': self.reward_total,
'l': self.curr_step},
'full_states': self.debug_states,
'obs': ob
}
self.simulator.close_gracefully() # Stop spice from blowing up
self.sim_init = 0
else:
info={
'full_states': self.debug_states,
'obs': ob
}
self.curr_step += 1
return ob, reward, self.episode_over, info
def _take_action(self, action):
'''
Interfaces with the simulator to
:param action:
:return:
'''
self.action_episode_memory[self.curr_episode].append(action)
# Let the simulator handle action management:
self.obs, self.debug_states, self.sim_over = self.simulator.run_sim(action)
def _get_reward(self):
"""
Reward is based on time spent with the inertial attitude pointed towards the ground within a given tolerance.
"""
reward = 0
real = np.array([self.debug_states[3],self.debug_states[4], self.debug_states[5]])
nav = np.array([self.debug_states[0],self.debug_states[1], self.debug_states[2]])
nav -= real
nav *= 1./np.linalg.norm(real)
if self.action_episode_memory[self.curr_episode][-1] == 1:
reward = np.linalg.norm(self.reward_mult / (1. + np.linalg.norm(nav)**2.0))
return reward
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns
-------
observation (object): the initial observation of the space.
"""
self.action_episode_memory.append([])
self.episode_over = False
self.curr_step = 0
self.reward_total = 0
self.simulator = opNavSimulator.scenario_OpNav(1., 1., self.step_duration)
self.sim_init=1
return self.simulator.obs
def _render(self, mode='human', close=False):
return
def _get_state(self):
"""Get the observation.
WIP: Work out which error representation to give the algo."""
return self.simulator.obs
|
[
"numpy.random.seed",
"basilisk_env.simulators.opNavSimulator.scenario_OpNav",
"gym.spaces.Discrete",
"numpy.zeros",
"numpy.array",
"gym.spaces.Box",
"numpy.linalg.norm"
] |
[((1050, 1085), 'gym.spaces.Box', 'spaces.Box', (['low', 'high'], {'shape': '(4, 1)'}), '(low, high, shape=(4, 1))\n', (1060, 1085), False, 'from gym import spaces\n'), ((1103, 1116), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (1111, 1116), True, 'import numpy as np\n'), ((1146, 1160), 'numpy.zeros', 'np.zeros', (['[12]'], {}), '([12])\n', (1154, 1160), True, 'import numpy as np\n'), ((1389, 1407), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (1404, 1407), False, 'from gym import spaces\n'), ((1608, 1624), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (1622, 1624), True, 'import numpy as np\n'), ((4719, 4795), 'numpy.array', 'np.array', (['[self.debug_states[3], self.debug_states[4], self.debug_states[5]]'], {}), '([self.debug_states[3], self.debug_states[4], self.debug_states[5]])\n', (4727, 4795), True, 'import numpy as np\n'), ((4809, 4885), 'numpy.array', 'np.array', (['[self.debug_states[0], self.debug_states[1], self.debug_states[2]]'], {}), '([self.debug_states[0], self.debug_states[1], self.debug_states[2]])\n', (4817, 4885), True, 'import numpy as np\n'), ((5510, 5569), 'basilisk_env.simulators.opNavSimulator.scenario_OpNav', 'opNavSimulator.scenario_OpNav', (['(1.0)', '(1.0)', 'self.step_duration'], {}), '(1.0, 1.0, self.step_duration)\n', (5539, 5569), False, 'from basilisk_env.simulators import opNavSimulator\n'), ((3045, 3104), 'basilisk_env.simulators.opNavSimulator.scenario_OpNav', 'opNavSimulator.scenario_OpNav', (['(1.0)', '(1.0)', 'self.step_duration'], {}), '(1.0, 1.0, self.step_duration)\n', (3074, 3104), False, 'from basilisk_env.simulators import opNavSimulator\n'), ((4923, 4943), 'numpy.linalg.norm', 'np.linalg.norm', (['real'], {}), '(real)\n', (4937, 4943), True, 'import numpy as np\n'), ((5073, 5092), 'numpy.linalg.norm', 'np.linalg.norm', (['nav'], {}), '(nav)\n', (5087, 5092), True, 'import numpy as np\n')]
|
import os
import random
import sys
import numpy as np
import pytest
sys.path.append(os.path.join(os.path.dirname(__file__)))
sys.path.append("\\".join(os.path.dirname(__file__).split("\\")[:-2]))
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from src.distributed_reflectors.reflector import Reflector
SEED = 0
PI = np.pi
straight_collision_and_clear_miss = (
np.array([[-3, 0, 0], [-3, 0, PI / 2]]),
1.0,
np.array([0, 0, PI / 2]),
np.array([1, 0]),
np.array([[0, 0, PI], [-3, 0, PI / 2]]),
)
barely_misses = (
np.array([[-1.0, 0, PI / 4], [-1.0, 0, -PI / 4]]),
1.0,
np.array([0, 0, PI / 2]),
np.array([0, 0]),
np.array([[-1.0, 0, PI / 4], [-1.0, 0, -PI / 4]]),
)
near_misses_but_hit = (
np.array([[-0.99, 0, PI / 4], [-0.99, 0, -PI / 4]]),
1.0,
np.array([0, 0, PI / 2]),
np.array([1, 1]),
np.array([[0.0, 0.99, 3 * PI / 4], [0.0, -0.99, 5 * PI / 4]]),
)
hit = (
np.array([[-np.sqrt(3) / 2, 0, PI / 6], [-np.sqrt(3) / 2, 0, -PI / 6]]),
1.0,
np.array([0, 0, PI / 2]),
np.array([1, 1]),
np.array([[0, 0.5, 5 * PI / 6], [0.0, -0.5, 7 * PI / 6]]),
)
def test_reflector_instantiation():
length = 2.0
coordinates = 5 * np.random.randn(3)
reflector = Reflector(length, coordinates)
assert isinstance(reflector, Reflector)
assert 0 <= reflector.angle <= np.pi
@pytest.mark.parametrize(
"particle_coordinates,length,reflector_coordinates,expected_collisions,expected_new_coordinates",
[straight_collision_and_clear_miss, barely_misses, near_misses_but_hit, hit],
)
def test_correct_collision_detections(
particle_coordinates,
length,
reflector_coordinates,
expected_collisions,
expected_new_coordinates,
):
reflector = Reflector(length, reflector_coordinates)
collisions = reflector.will_collide(particle_coordinates)
np.testing.assert_equal(collisions, expected_collisions)
@pytest.mark.parametrize(
"particle_coordinates,length,reflector_coordinates,expected_collisions,expected_new_coordinates",
[straight_collision_and_clear_miss, barely_misses, near_misses_but_hit, hit],
)
def test_correct_collision_coordinates(
particle_coordinates,
length,
reflector_coordinates,
expected_collisions,
expected_new_coordinates,
):
reflector = Reflector(length, reflector_coordinates)
particle_collisions = reflector.will_collide(particle_coordinates)
new_coordinates = reflector.get_new_coordinates(
particle_coordinates, particle_collisions
)
np.testing.assert_allclose(new_coordinates, expected_new_coordinates, atol=10 ** -5)
|
[
"src.distributed_reflectors.reflector.Reflector",
"numpy.random.randn",
"os.path.dirname",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.assert_equal",
"pytest.mark.parametrize",
"numpy.sqrt"
] |
[((1387, 1600), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""particle_coordinates,length,reflector_coordinates,expected_collisions,expected_new_coordinates"""', '[straight_collision_and_clear_miss, barely_misses, near_misses_but_hit, hit]'], {}), "(\n 'particle_coordinates,length,reflector_coordinates,expected_collisions,expected_new_coordinates'\n , [straight_collision_and_clear_miss, barely_misses,\n near_misses_but_hit, hit])\n", (1410, 1600), False, 'import pytest\n'), ((1945, 2158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""particle_coordinates,length,reflector_coordinates,expected_collisions,expected_new_coordinates"""', '[straight_collision_and_clear_miss, barely_misses, near_misses_but_hit, hit]'], {}), "(\n 'particle_coordinates,length,reflector_coordinates,expected_collisions,expected_new_coordinates'\n , [straight_collision_and_clear_miss, barely_misses,\n near_misses_but_hit, hit])\n", (1968, 2158), False, 'import pytest\n'), ((391, 430), 'numpy.array', 'np.array', (['[[-3, 0, 0], [-3, 0, PI / 2]]'], {}), '([[-3, 0, 0], [-3, 0, PI / 2]])\n', (399, 430), True, 'import numpy as np\n'), ((445, 469), 'numpy.array', 'np.array', (['[0, 0, PI / 2]'], {}), '([0, 0, PI / 2])\n', (453, 469), True, 'import numpy as np\n'), ((475, 491), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (483, 491), True, 'import numpy as np\n'), ((497, 536), 'numpy.array', 'np.array', (['[[0, 0, PI], [-3, 0, PI / 2]]'], {}), '([[0, 0, PI], [-3, 0, PI / 2]])\n', (505, 536), True, 'import numpy as np\n'), ((563, 612), 'numpy.array', 'np.array', (['[[-1.0, 0, PI / 4], [-1.0, 0, -PI / 4]]'], {}), '([[-1.0, 0, PI / 4], [-1.0, 0, -PI / 4]])\n', (571, 612), True, 'import numpy as np\n'), ((627, 651), 'numpy.array', 'np.array', (['[0, 0, PI / 2]'], {}), '([0, 0, PI / 2])\n', (635, 651), True, 'import numpy as np\n'), ((657, 673), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (665, 673), True, 'import numpy as np\n'), ((679, 728), 'numpy.array', 'np.array', (['[[-1.0, 0, PI / 4], [-1.0, 0, -PI / 4]]'], {}), '([[-1.0, 0, PI / 4], [-1.0, 0, -PI / 4]])\n', (687, 728), True, 'import numpy as np\n'), ((761, 812), 'numpy.array', 'np.array', (['[[-0.99, 0, PI / 4], [-0.99, 0, -PI / 4]]'], {}), '([[-0.99, 0, PI / 4], [-0.99, 0, -PI / 4]])\n', (769, 812), True, 'import numpy as np\n'), ((827, 851), 'numpy.array', 'np.array', (['[0, 0, PI / 2]'], {}), '([0, 0, PI / 2])\n', (835, 851), True, 'import numpy as np\n'), ((857, 873), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (865, 873), True, 'import numpy as np\n'), ((879, 940), 'numpy.array', 'np.array', (['[[0.0, 0.99, 3 * PI / 4], [0.0, -0.99, 5 * PI / 4]]'], {}), '([[0.0, 0.99, 3 * PI / 4], [0.0, -0.99, 5 * PI / 4]])\n', (887, 940), True, 'import numpy as np\n'), ((1043, 1067), 'numpy.array', 'np.array', (['[0, 0, PI / 2]'], {}), '([0, 0, PI / 2])\n', (1051, 1067), True, 'import numpy as np\n'), ((1073, 1089), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1081, 1089), True, 'import numpy as np\n'), ((1095, 1152), 'numpy.array', 'np.array', (['[[0, 0.5, 5 * PI / 6], [0.0, -0.5, 7 * PI / 6]]'], {}), '([[0, 0.5, 5 * PI / 6], [0.0, -0.5, 7 * PI / 6]])\n', (1103, 1152), True, 'import numpy as np\n'), ((1268, 1298), 'src.distributed_reflectors.reflector.Reflector', 'Reflector', (['length', 'coordinates'], {}), '(length, coordinates)\n', (1277, 1298), False, 'from src.distributed_reflectors.reflector import Reflector\n'), ((1777, 1817), 'src.distributed_reflectors.reflector.Reflector', 'Reflector', (['length', 'reflector_coordinates'], {}), '(length, reflector_coordinates)\n', (1786, 1817), False, 'from src.distributed_reflectors.reflector import Reflector\n'), ((1885, 1941), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['collisions', 'expected_collisions'], {}), '(collisions, expected_collisions)\n', (1908, 1941), True, 'import numpy as np\n'), ((2336, 2376), 'src.distributed_reflectors.reflector.Reflector', 'Reflector', (['length', 'reflector_coordinates'], {}), '(length, reflector_coordinates)\n', (2345, 2376), False, 'from src.distributed_reflectors.reflector import Reflector\n'), ((2562, 2651), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['new_coordinates', 'expected_new_coordinates'], {'atol': '(10 ** -5)'}), '(new_coordinates, expected_new_coordinates, atol=\n 10 ** -5)\n', (2588, 2651), True, 'import numpy as np\n'), ((99, 124), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (114, 124), False, 'import os\n'), ((227, 252), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (242, 252), False, 'import os\n'), ((1233, 1251), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (1248, 1251), True, 'import numpy as np\n'), ((153, 178), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'import os\n'), ((969, 979), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (976, 979), True, 'import numpy as np\n'), ((999, 1009), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1006, 1009), True, 'import numpy as np\n')]
|
from helper import unittest, PillowTestCase, hopper
from PIL import Image
from PIL import SpiderImagePlugin
TEST_FILE = "Tests/images/hopper.spider"
class TestImageSpider(PillowTestCase):
def test_sanity(self):
im = Image.open(TEST_FILE)
im.load()
self.assertEqual(im.mode, "F")
self.assertEqual(im.size, (128, 128))
self.assertEqual(im.format, "SPIDER")
def test_save(self):
# Arrange
temp = self.tempfile('temp.spider')
im = hopper()
# Act
im.save(temp, "SPIDER")
# Assert
im2 = Image.open(temp)
self.assertEqual(im2.mode, "F")
self.assertEqual(im2.size, (128, 128))
self.assertEqual(im2.format, "SPIDER")
def test_isSpiderImage(self):
self.assertTrue(SpiderImagePlugin.isSpiderImage(TEST_FILE))
def test_tell(self):
# Arrange
im = Image.open(TEST_FILE)
# Act
index = im.tell()
# Assert
self.assertEqual(index, 0)
def test_loadImageSeries(self):
# Arrange
not_spider_file = "Tests/images/hopper.ppm"
file_list = [TEST_FILE, not_spider_file, "path/not_found.ext"]
# Act
img_list = SpiderImagePlugin.loadImageSeries(file_list)
# Assert
self.assertEqual(len(img_list), 1)
self.assertIsInstance(img_list[0], Image.Image)
self.assertEqual(img_list[0].size, (128, 128))
def test_loadImageSeries_no_input(self):
# Arrange
file_list = None
# Act
img_list = SpiderImagePlugin.loadImageSeries(file_list)
# Assert
self.assertEqual(img_list, None)
def test_isInt_not_a_number(self):
# Arrange
not_a_number = "a"
# Act
ret = SpiderImagePlugin.isInt(not_a_number)
# Assert
self.assertEqual(ret, 0)
if __name__ == '__main__':
unittest.main()
# End of file
|
[
"helper.unittest.main",
"PIL.SpiderImagePlugin.loadImageSeries",
"PIL.Image.open",
"PIL.SpiderImagePlugin.isSpiderImage",
"PIL.SpiderImagePlugin.isInt",
"helper.hopper"
] |
[((1912, 1927), 'helper.unittest.main', 'unittest.main', ([], {}), '()\n', (1925, 1927), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((233, 254), 'PIL.Image.open', 'Image.open', (['TEST_FILE'], {}), '(TEST_FILE)\n', (243, 254), False, 'from PIL import Image\n'), ((505, 513), 'helper.hopper', 'hopper', ([], {}), '()\n', (511, 513), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((593, 609), 'PIL.Image.open', 'Image.open', (['temp'], {}), '(temp)\n', (603, 609), False, 'from PIL import Image\n'), ((904, 925), 'PIL.Image.open', 'Image.open', (['TEST_FILE'], {}), '(TEST_FILE)\n', (914, 925), False, 'from PIL import Image\n'), ((1232, 1276), 'PIL.SpiderImagePlugin.loadImageSeries', 'SpiderImagePlugin.loadImageSeries', (['file_list'], {}), '(file_list)\n', (1265, 1276), False, 'from PIL import SpiderImagePlugin\n'), ((1572, 1616), 'PIL.SpiderImagePlugin.loadImageSeries', 'SpiderImagePlugin.loadImageSeries', (['file_list'], {}), '(file_list)\n', (1605, 1616), False, 'from PIL import SpiderImagePlugin\n'), ((1790, 1827), 'PIL.SpiderImagePlugin.isInt', 'SpiderImagePlugin.isInt', (['not_a_number'], {}), '(not_a_number)\n', (1813, 1827), False, 'from PIL import SpiderImagePlugin\n'), ((803, 845), 'PIL.SpiderImagePlugin.isSpiderImage', 'SpiderImagePlugin.isSpiderImage', (['TEST_FILE'], {}), '(TEST_FILE)\n', (834, 845), False, 'from PIL import SpiderImagePlugin\n')]
|
import unittest
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
adapter = HTTPAdapter(max_retries=Retry(total=5, backoff_factor=1))
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
class ApiTests(unittest.TestCase):
def test_healthcheck(self):
result = http.get("http://app:8080/health")
self.assertEqual(200, result.status_code)
|
[
"requests.Session",
"urllib3.util.retry.Retry"
] |
[((188, 206), 'requests.Session', 'requests.Session', ([], {}), '()\n', (204, 206), False, 'import requests\n'), ((147, 179), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(1)'}), '(total=5, backoff_factor=1)\n', (152, 179), False, 'from urllib3.util.retry import Retry\n')]
|
from pycocotools.coco import COCO
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import random
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
from skimage import io,transform
import matplotlib.pyplot as plt
import os
import torch
from torchvision import transforms
import numpy as np
import PIL.Image as Image
from skimage import measure
from tqdm import tqdm
import torch.nn.functional as F
from skimage.morphology import convex_hull_image
class SuperPixelGet(Dataset): #继承Dataset
def __init__(self, segments_label, segments_tensor, g_theta_m, data_num):
self.segments_label = segments_label.cuda()
self.segments_tensor = segments_tensor.cuda()
self.g_theta_m = g_theta_m.cuda()
self.data_num = data_num
self.zero_layer = torch.zeros_like(self.segments_tensor)
self.one_layer = torch.ones_like(self.segments_tensor)
def __len__(self):
return self.data_num
def __getitem__(self, index):
attack_region_tmp = self.zero_layer.clone()
flag = torch.rand_like( self.segments_label) < self.g_theta_m
for i in range(flag.shape[0]):
if flag[i]:
sp = self.segments_label[i]
attack_region_tmp = torch.where(self.segments_tensor==sp, self.one_layer, attack_region_tmp)
# # get convex envolope
# attack_region_tmp_np = attack_region_tmp.cpu().numpy()
# attack_region_tmp_label_np = measure.label(attack_region_tmp_np)
# connect_region_number = int(np.max(attack_region_tmp_label_np))
# one_np = np.ones_like(attack_region_tmp_np)
# zero_np = np.zeros_like(attack_region_tmp_np)
# attack_region_envolope_np = np.zeros_like(attack_region_tmp_np)
# for i in range(connect_region_number):
# binary_map = np.where(attack_region_tmp_label_np==i+1, one_np, zero_np)
# convex_env = convex_hull_image(binary_map)
# attack_region_envolope_np = attack_region_envolope_np + convex_env
# pass
# attack_region_tmp = torch.from_numpy(attack_region_envolope_np)
# attack_region_tmp = torch.clamp(attack_region_tmp, 0, 1).cuda()
return attack_region_tmp, flag
if __name__=='__main__':
segments_tensor = [
[0,0,1,1,1,2,2,2,3,3,4,4,5,5,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,4,5,5,0,0],
[0,0,1,1,1,2,3,3,3,3,4,4,4,5,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
[0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],
]
segments_tensor = torch.Tensor(segments_tensor)
g_theta_m = torch.Tensor([0.1,0.2,0.3,0.4,0.5,0.6])
data_num = 555
data = SuperPixelGet(torch.Tensor([1,2,3,4,5,6]), segments_tensor, g_theta_m, data_num)
dataloader = DataLoader(data, batch_size=128,shuffle=False) #使用DataLoader加载数据
max_len = 0
for epoch in range(10):
for i_batch, batch_data in enumerate(dataloader):
sum_tensor = torch.sum(batch_data, dim=0)
sum_tensor = sum_tensor/torch.max(sum_tensor)
sum_tensor = sum_tensor.unsqueeze(0).unsqueeze(0)
sum_tensor = F.interpolate(sum_tensor, (800, 800), mode='nearest').squeeze()
sum_pil = transforms.ToPILImage()(sum_tensor)
sum_pil.show()
pass
|
[
"torch.ones_like",
"torch.zeros_like",
"torch.utils.data.DataLoader",
"torch.where",
"torchvision.transforms.ToPILImage",
"torch.rand_like",
"torch.Tensor",
"torch.max",
"torch.nn.functional.interpolate",
"torch.sum"
] |
[((2956, 2985), 'torch.Tensor', 'torch.Tensor', (['segments_tensor'], {}), '(segments_tensor)\n', (2968, 2985), False, 'import torch\n'), ((3002, 3046), 'torch.Tensor', 'torch.Tensor', (['[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]'], {}), '([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n', (3014, 3046), False, 'import torch\n'), ((3170, 3217), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(data, batch_size=128, shuffle=False)\n', (3180, 3217), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((855, 893), 'torch.zeros_like', 'torch.zeros_like', (['self.segments_tensor'], {}), '(self.segments_tensor)\n', (871, 893), False, 'import torch\n'), ((919, 956), 'torch.ones_like', 'torch.ones_like', (['self.segments_tensor'], {}), '(self.segments_tensor)\n', (934, 956), False, 'import torch\n'), ((3086, 3118), 'torch.Tensor', 'torch.Tensor', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (3098, 3118), False, 'import torch\n'), ((1122, 1158), 'torch.rand_like', 'torch.rand_like', (['self.segments_label'], {}), '(self.segments_label)\n', (1137, 1158), False, 'import torch\n'), ((3364, 3392), 'torch.sum', 'torch.sum', (['batch_data'], {'dim': '(0)'}), '(batch_data, dim=0)\n', (3373, 3392), False, 'import torch\n'), ((1329, 1403), 'torch.where', 'torch.where', (['(self.segments_tensor == sp)', 'self.one_layer', 'attack_region_tmp'], {}), '(self.segments_tensor == sp, self.one_layer, attack_region_tmp)\n', (1340, 1403), False, 'import torch\n'), ((3429, 3450), 'torch.max', 'torch.max', (['sum_tensor'], {}), '(sum_tensor)\n', (3438, 3450), False, 'import torch\n'), ((3624, 3647), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3645, 3647), False, 'from torchvision import transforms\n'), ((3538, 3591), 'torch.nn.functional.interpolate', 'F.interpolate', (['sum_tensor', '(800, 800)'], {'mode': '"""nearest"""'}), "(sum_tensor, (800, 800), mode='nearest')\n", (3551, 3591), True, 'import torch.nn.functional as F\n')]
|
import os
import sys
import subprocess
import platform
import tempfile
import logging
def fixdate():
date = None
_f = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
if platform.system() == "Linux":
try:
_f = 'date +"%m/%d/%y %I:%M:%S %p"'
date = subprocess.check_output(_f).decode("utf-8").strip()
date += " %(name)s - %(levelname)s - %(message)s"
except:
pass
else:
date = _f
return date
class Logger(object):
logit = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logfile = os.path.join(tempfile.gettempdir(), "pydatpiff.log")
handler = logging.FileHandler(logfile)
handler.setLevel(logging.WARNING)
_format = logging.Formatter(fixdate(), datefmt="%m/%d/%Y %I:%M:%S %p %Z")
handler.setFormatter(_format)
logit.addHandler(handler)
@classmethod
def _parseLog(cls, *msg, level="info"):
msg = " ".join(msg)
if level == "info":
cls.logit.info(msg)
elif level == "warning":
cls.logit.warning(msg)
elif level == "critical":
cls.logit.critical(msg)
@classmethod
def display(cls, *msg):
cls._parseLog(*msg, level="info")
@classmethod
def warn(cls, *msg):
cls._parseLog(*msg, level="warning")
@classmethod
def failed(cls, *msg):
cls._parseLog(*msg, level="critical")
class Error(Exception):
__error__ = {
1: "invalid mixtapes object",
2: "no mixtapes found",
}
code = None
message = "Unknown"
def __init__(self, code, detail=""):
self._code = code
code = self.create(code, detail)
super().__init__(code)
def logError(self, error, critical=False):
if error and error in self.__error__:
if not critical:
Logger.warn(self.__error__[error])
else:
Logger.failed(self.__error__[error])
sys.exit(1)
def show(self, code):
return self.__error__.get(code)
@staticmethod
def makeError(code):
# code = code or self.message
cont = []
for x in code.split(" "):
cont.append(x.capitalize())
cont = "".join(cont)
return cont
@classmethod
def create(cls, export_to, long_msg=""):
if isinstance(export_to, str):
max_e = max(cls.__error__)
cls.__error__[max_e + 1] = Error.makeError(export_to)
return Error.create(max_e + 1)
elif export_to in cls.__error__:
for code, error in cls.__error__.items():
if not isinstance(error, str):
return error
name = Error.makeError(error)
e = type(name, (cls,), {"code": code, "message": error,})
cls.__error__[code] = e
return "".join((str(cls.__error__[export_to]), "\n" + long_msg))
class MixtapesError(Error):
""" handle all the Mixtapes errors"""
__error__ = {
1: "No Mixtapes Found",
2: "Invalid category selected",
3: "Unable to process Data",
4: "Invalid data type",
4: "TooFewCharacters",
}
class MediaError(Error):
""" handle all the Media errors"""
__error__ = {
1: "no mixtapes found",
2: "invalid mixtapes object",
3: "media album not set",
4: "invaild track index",
5: "song selection error",
6: "unsupported media player",
7: "media player not found",
8: "song cache storage failed",
}
class AlbumError(Error):
__error__ = {
1: "Mixtapes Not Found",
2: "Invalid category selected",
}
class Mp3Error(Error):
__error__ = {
1: "player id error",
}
class DatpiffError(Error):
__error__ = {
1: "Datpiff media server down",
2: "Datpiff desktop version failed",
3: "Datpiff mobile version failed",
}
class MvpError(Error):
__error__ = {
1: "song path does not exist",
}
class PlayerError(Error):
__error__ = {
1: "Unsupport media object",
2: "no song found",
3: "derive class missing function",
4: "call back method missing",
5: "unsupported player",
}
class RequestError(Error):
# _type = self.__class__._qualname__
__error__ = {
1: "invalid url scheme",
2: "web page timed out",
3: "request failed",
4: "requests status code error",
}
class BuildError(Error):
__error__ = {
1,
"user selection",
}
class InstallationError(Error):
_extra = (
"\nPydatpiff Audio requires either VLC or MPV installation."
"\n\nView"
" https://github.com/cbedroid/pydatpiff/blob/master/README.md"
" for more installation instructions."
""
)
__error__ = {
1,
"Pydatpiff installion error",
}
|
[
"logging.FileHandler",
"logging.basicConfig",
"subprocess.check_output",
"tempfile.gettempdir",
"platform.system",
"sys.exit",
"logging.getLogger"
] |
[((528, 555), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (545, 555), False, 'import logging\n'), ((560, 624), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING', 'format': '"""%(message)s"""'}), "(level=logging.WARNING, format='%(message)s')\n", (579, 624), False, 'import logging\n'), ((706, 734), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (725, 734), False, 'import logging\n'), ((190, 207), 'platform.system', 'platform.system', ([], {}), '()\n', (205, 207), False, 'import platform\n'), ((652, 673), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (671, 673), False, 'import tempfile\n'), ((2033, 2044), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2041, 2044), False, 'import sys\n'), ((301, 328), 'subprocess.check_output', 'subprocess.check_output', (['_f'], {}), '(_f)\n', (324, 328), False, 'import subprocess\n')]
|
# coding: utf-8
# app: incidencias
# module: urls.py
# date: miércoles, 06 de junio de 2018 - 11:30
# description: Patrones de ruta de cobertura.
# pylint: disable=W0613,R0201,R0903
from django.urls import path
from apps.incidencias.views import Portada, EventoDetail
app_name = 'incidencias'
urlpatterns = [
path('', Portada.as_view(), name='index'),
path('<int:pk>/', EventoDetail.as_view(), name='evento')
]
|
[
"apps.incidencias.views.Portada.as_view",
"apps.incidencias.views.EventoDetail.as_view"
] |
[((345, 362), 'apps.incidencias.views.Portada.as_view', 'Portada.as_view', ([], {}), '()\n', (360, 362), False, 'from apps.incidencias.views import Portada, EventoDetail\n'), ((401, 423), 'apps.incidencias.views.EventoDetail.as_view', 'EventoDetail.as_view', ([], {}), '()\n', (421, 423), False, 'from apps.incidencias.views import Portada, EventoDetail\n')]
|
from utime import sleep
from npxl import NeoPixel
from colors import *
strip = NeoPixel(4, 8)
strip.fill(BLACK)
while True:
strip[0] = YELLOW
strip.show()
sleep(1.0)
strip.fill(BLACK)
strip.show()
sleep(1.0)
|
[
"npxl.NeoPixel",
"utime.sleep"
] |
[((80, 94), 'npxl.NeoPixel', 'NeoPixel', (['(4)', '(8)'], {}), '(4, 8)\n', (88, 94), False, 'from npxl import NeoPixel\n'), ((169, 179), 'utime.sleep', 'sleep', (['(1.0)'], {}), '(1.0)\n', (174, 179), False, 'from utime import sleep\n'), ((223, 233), 'utime.sleep', 'sleep', (['(1.0)'], {}), '(1.0)\n', (228, 233), False, 'from utime import sleep\n')]
|
import ray
import wandb
from agent.workers.DreamerWorker import DreamerWorker
class DreamerServer:
def __init__(self, n_workers, env_config, controller_config, model):
ray.init()
self.workers = [DreamerWorker.remote(i, env_config, controller_config) for i in range(n_workers)]
self.tasks = [worker.run.remote(model) for worker in self.workers]
def append(self, idx, update):
self.tasks.append(self.workers[idx].run.remote(update))
def run(self):
done_id, tasks = ray.wait(self.tasks)
self.tasks = tasks
recvs = ray.get(done_id)[0]
return recvs
class DreamerRunner:
def __init__(self, env_config, learner_config, controller_config, n_workers):
self.n_workers = n_workers
self.learner = learner_config.create_learner()
self.server = DreamerServer(n_workers, env_config, controller_config, self.learner.params())
def run(self, max_steps=10 ** 10, max_episodes=10 ** 10):
cur_steps, cur_episode = 0, 0
while True:
rollout, info = self.server.run()
self.learner.step(rollout)
cur_steps += info["steps_done"]
cur_episode += 1
wandb.log({'reward': info["reward"]})
print(cur_episode, self.learner.total_samples, info["reward"])
if cur_episode >= max_episodes or cur_steps >= max_steps:
break
self.server.append(info['idx'], self.learner.params())
|
[
"wandb.log",
"ray.init",
"ray.get",
"agent.workers.DreamerWorker.DreamerWorker.remote",
"ray.wait"
] |
[((183, 193), 'ray.init', 'ray.init', ([], {}), '()\n', (191, 193), False, 'import ray\n'), ((521, 541), 'ray.wait', 'ray.wait', (['self.tasks'], {}), '(self.tasks)\n', (529, 541), False, 'import ray\n'), ((219, 273), 'agent.workers.DreamerWorker.DreamerWorker.remote', 'DreamerWorker.remote', (['i', 'env_config', 'controller_config'], {}), '(i, env_config, controller_config)\n', (239, 273), False, 'from agent.workers.DreamerWorker import DreamerWorker\n'), ((585, 601), 'ray.get', 'ray.get', (['done_id'], {}), '(done_id)\n', (592, 601), False, 'import ray\n'), ((1215, 1252), 'wandb.log', 'wandb.log', (["{'reward': info['reward']}"], {}), "({'reward': info['reward']})\n", (1224, 1252), False, 'import wandb\n')]
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: <NAME>
@license: GNU General Public License 2.0
@contact: <EMAIL>
@organization:
"""
import struct, string
import volatility.obj as obj
import volatility.debug as debug
import volatility.addrspace as addrspace
import volatility.plugins.mac.common as mac_common
import volatility.plugins.mac.pstasks as mac_tasks
from volatility.renderers import TreeGrid
bash_vtypes = {
'bash32_hist_entry': [ 0xc, {
'line': [0x0, ['pointer', ['String', dict(length = 1024)]]],
'timestamp': [0x4, ['pointer', ['String', dict(length = 1024)]]],
'data': [0x8, ['pointer', ['void']]],
}],
'bash64_hist_entry': [ 24, {
'line': [0, ['pointer', ['String', dict(length = 1024)]]],
'timestamp': [8, ['pointer', ['String', dict(length = 1024)]]],
'data': [16, ['pointer', ['void']]],
}],
}
class _mac_hist_entry(obj.CType):
"""A class for history entries"""
def is_valid(self):
line_addr = self.line_ptr()
time_addr = self.time_ptr()
if (not obj.CType.is_valid(self) or
not self.obj_vm.is_valid_address(line_addr) or
not self.obj_vm.is_valid_address(time_addr)):
return False
ts = self.obj_vm.read(time_addr, 256)
if not ts:
return False
idx = ts.find("\x00")
if idx != -1:
ts = ts[:idx]
# At this point in time, the epoc integer size will
# never be less than 10 characters, and the stamp is
# always preceded by a pound/hash character.
if len(ts) < 10 or str(ts)[0] != "#":
return False
# The final check is to make sure the entire string
# is composed of numbers. Try to convert to an int.
try:
int(str(ts)[1:])
except ValueError:
return False
return True
def line(self):
line_addr = self.line_ptr()
buf = self.obj_vm.read(line_addr, 256)
if buf:
idx = buf.find("\x00")
if idx != -1:
buf = buf[:idx]
ret = "".join([c for c in buf if c in string.printable])
else:
ret = ""
return ret
@property
def time_as_integer(self):
# Get the string and remove the leading "#" from the timestamp
time_addr = self.time_ptr()
ts = self.obj_vm.read(time_addr, 256)
ts = ts[1:]
idx = ts.find("\x00")
if idx != -1:
ts = ts[:idx]
# Convert the string into an integer (number of seconds)
return int(ts)
def time_object(self):
nsecs = self.time_as_integer
# Build a timestamp object from the integer
time_val = struct.pack("<I", nsecs)
time_buf = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = time_val)
time_obj = obj.Object("UnixTimeStamp", offset = 0, vm = time_buf, is_utc = True)
return time_obj
def line_ptr(self):
addr = self.m("line").obj_offset
return self.read_ptr(addr)
def time_ptr(self):
addr = self.m("timestamp").obj_offset
return self.read_ptr(addr)
class bash64_hist_entry(_mac_hist_entry):
def read_ptr(self, addr):
addr = self.obj_vm.read(addr, 8)
addr = struct.unpack("<Q", addr)[0]
return addr
class bash32_hist_entry(_mac_hist_entry):
def read_ptr(self, addr):
addr = self.obj_vm.read(addr, 4)
addr = struct.unpack("<I", addr)[0]
return addr
class MacBashTypes(obj.ProfileModification):
conditions = {"os" : lambda x : x in ["mac"]}
def modification(self, profile):
profile.vtypes.update(bash_vtypes)
profile.object_classes.update({"bash32_hist_entry": bash32_hist_entry, "bash64_hist_entry": bash64_hist_entry})
class mac_bash(mac_tasks.mac_tasks):
"""Recover bash history from bash process memory"""
def __init__(self, config, *args, **kwargs):
mac_tasks.mac_tasks.__init__(self, config, *args, **kwargs)
self._config.add_option('SCAN_ALL', short_option = 'A', default = False, help = 'scan all processes, not just those named bash', action = 'store_true')
def unified_output(self, data):
return TreeGrid([("Pid", int),
("Name", str),
("Command Time", str),
("Command", str),
], self.generator(data))
def generator(self, data):
for task in data:
if not (self._config.SCAN_ALL or str(task.p_comm) == "bash"):
continue
for hist_entry in task.bash_history_entries():
yield (0, [
int(task.p_pid),
str(task.p_comm),
str(hist_entry.time_object()),
str(hist_entry.line()),
])
def render_text(self, outfd, data):
self.table_header(outfd, [("Pid", "8"),
("Name", "20"),
("Command Time", "30"),
("Command", ""),])
for task in data:
if not (self._config.SCAN_ALL or str(task.p_comm) == "bash"):
continue
for hist_entry in task.bash_history_entries():
self.table_row(outfd, task.p_pid, task.p_comm,
hist_entry.time_object(),
hist_entry.line())
|
[
"volatility.plugins.mac.pstasks.mac_tasks.__init__",
"volatility.obj.Object",
"volatility.obj.CType.is_valid",
"struct.unpack",
"struct.pack"
] |
[((3485, 3509), 'struct.pack', 'struct.pack', (['"""<I"""', 'nsecs'], {}), "('<I', nsecs)\n", (3496, 3509), False, 'import struct, string\n'), ((3620, 3683), 'volatility.obj.Object', 'obj.Object', (['"""UnixTimeStamp"""'], {'offset': '(0)', 'vm': 'time_buf', 'is_utc': '(True)'}), "('UnixTimeStamp', offset=0, vm=time_buf, is_utc=True)\n", (3630, 3683), True, 'import volatility.obj as obj\n'), ((4727, 4786), 'volatility.plugins.mac.pstasks.mac_tasks.__init__', 'mac_tasks.mac_tasks.__init__', (['self', 'config', '*args'], {}), '(self, config, *args, **kwargs)\n', (4755, 4786), True, 'import volatility.plugins.mac.pstasks as mac_tasks\n'), ((4050, 4075), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'addr'], {}), "('<Q', addr)\n", (4063, 4075), False, 'import struct, string\n'), ((4228, 4253), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'addr'], {}), "('<I', addr)\n", (4241, 4253), False, 'import struct, string\n'), ((1770, 1794), 'volatility.obj.CType.is_valid', 'obj.CType.is_valid', (['self'], {}), '(self)\n', (1788, 1794), True, 'import volatility.obj as obj\n')]
|
from jinja2 import Environment, FileSystemLoader
import webbrowser
import time
##################################################
## This is report module and responsible to generate report.
##################################################
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Project"
__credits__ = []
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "POC"
class TestReport:
def __init__(self):
# Load templates file from templtes folder
self.env = Environment(loader=FileSystemLoader('./report'), trim_blocks=True, lstrip_blocks=True)
self.template = self.env.get_template('index.html')
self.rendered_filename = "report_"+time.strftime("%Y%m%d-%H%M%S")+".html"
self.rendered_file_path = "./report/" + self.rendered_filename
self.template_vars = {
"total_number_of_test": '',
"passed": '',
"failed": '',
"not_executed": '',
"result_table": ''
}
def generate_html(self, *args):
self.prepare_summary(args[1])
self.prepare_result_table(args[0])
print("Test result: ")
print(self.template_vars)
output_text = self.template.render(self.template_vars)
with open(self.rendered_file_path, "w") as result_file:
result_file.write(output_text)
# open a public URL, in this case, the webbrowser docs
# url = "http://docs.python.org/library/webbrowser.html"
# webbrowser.open(url, new=new)
# open an HTML file on my own (Windows) computer
url = "file://D:/AppiumProject/vrit/report/"+self.rendered_filename
webbrowser.open(url)
def prepare_summary(self, summary):
# print(summary)
self.template_vars["total_number_of_test"] = summary["total_number_of_test"]
self.template_vars["passed"] = summary["passed"]
self.template_vars["failed"] = summary["failed"]
self.template_vars["not_executed"] = summary["not_executed"]
def prepare_result_table(self, test_result):
html_table = ""
for test in test_result:
html_table += '<tr role="row">'
html_table += '<td>'+test["title"]+'</td>'
html_table += '<td>'+test["type"]+'</td>'
html_table += '<td>'+test["status"]+'</td>'
html_table += '</tr>'
self.template_vars["result_table"] = html_table
|
[
"jinja2.FileSystemLoader",
"webbrowser.open",
"time.strftime"
] |
[((1709, 1729), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (1724, 1729), False, 'import webbrowser\n'), ((567, 595), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""./report"""'], {}), "('./report')\n", (583, 595), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((738, 768), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (751, 768), False, 'import time\n')]
|
from typing import Any, Dict, List, Tuple
import urwid
from zulipterminal.config import is_command_key
class MenuButton(urwid.Button):
def __init__(self, caption: Any, email: str='') -> None:
self.caption = caption # str
self.email = email
super(MenuButton, self).__init__("")
self._w = urwid.AttrMap(urwid.SelectableIcon(
[self.caption], 0), None, 'selected')
class HomeButton(urwid.Button):
def __init__(self, controller: Any, count: int=0) -> None:
self.caption = 'All messages'
self.count = count
super(HomeButton, self).__init__("")
self._w = self.widget(count)
self.controller = controller
urwid.connect_signal(self, 'click', controller.show_all_messages)
def update_count(self, count: int) -> None:
self.count = count
self._w = self.widget(count)
def widget(self, count: int) -> Any:
return urwid.AttrMap(urwid.SelectableIcon(
[u' \N{BULLET} ', self.caption,
('idle', '' if count <= 0 else ' ' + str(count))],
len(self.caption) + 4),
None,
'selected')
def keypress(self, size: Tuple[int, int], key: str) -> str:
if is_command_key('ENTER', key):
self.controller.view.toggle_left_panel()
return super(HomeButton, self).keypress(size, key)
class PMButton(urwid.Button):
def __init__(self, controller: Any, count: int=0) -> None:
self.caption = 'Private messages'
super(PMButton, self).__init__("")
self.count = count
self._w = self.widget(count)
self.controller = controller
urwid.connect_signal(self, 'click', controller.show_all_pm)
def update_count(self, count: int) -> None:
self.count = count
self._w = self.widget(count)
def widget(self, count: int) -> Any:
return urwid.AttrMap(urwid.SelectableIcon(
[u' \N{BULLET} ', self.caption,
('idle', '' if count <= 0 else ' ' + str(count))],
len(self.caption) + 4),
None,
'selected')
def keypress(self, size: Tuple[int, int], key: str) -> str:
if is_command_key('ENTER', key):
self.controller.view.toggle_left_panel()
return super(PMButton, self).keypress(size, key)
class StreamButton(urwid.Button):
def __init__(self, properties: List[Any],
controller: Any, view: Any, count: int=0) -> None:
self.caption = properties[0]
self.stream_id = properties[1]
color = properties[2]
self.color = color[:2] + color[3] + color[5]
view.palette['default'].append((self.color, '', '', '', self.color,
'black'))
view.palette['default'].append(('s' + self.color, '', '', '',
'black', self.color))
self.is_private = properties[3]
self.count = count
super(StreamButton, self).__init__("")
self._w = self.widget(count)
self.controller = controller
urwid.connect_signal(self, 'click', controller.narrow_to_stream)
def update_count(self, count: int) -> None:
self.count = count
self._w = self.widget(count)
def widget(self, count: int) -> Any:
stream_prefix = ' ' + ('P' if self.is_private else '#') + ' '
return urwid.AttrMap(urwid.SelectableIcon(
[(self.color, stream_prefix), self.caption,
('idle', '' if count <= 0 else ' ' + str(count))],
len(self.caption) + 2),
None,
'selected')
def keypress(self, size: Tuple[int, int], key: str) -> str:
if is_command_key('ENTER', key):
self.controller.view.toggle_left_panel()
return super(StreamButton, self).keypress(size, key)
class UserButton(urwid.Button):
def __init__(self, user: Dict[str, Any], controller: Any,
view: Any, color: str=None, count: int=0) -> None:
self.caption = user['full_name'] # str
self.email = user['email']
self.user_id = user['user_id']
self.color = color
self.count = count
self.recipients = frozenset({
self.user_id, view.model.user_id})
super(UserButton, self).__init__("")
self._w = self.widget(count)
self.controller = controller
self.view = view
def update_count(self, count: int) -> None:
self.count = count
self._w = self.widget(count)
def widget(self, count: int) -> Any:
return urwid.AttrMap(urwid.SelectableIcon(
[u'\N{BULLET} ', self.caption,
('idle', '' if count <= 0 else ' ' + str(count))],
len(self.caption) + 2),
self.color,
'selected')
def keypress(self, size: Tuple[int, int], key: str) -> str:
if is_command_key('ENTER', key):
self.controller.narrow_to_user(self)
self.view.body.focus_col = 1
self.view.body.focus.original_widget.set_focus('footer')
self.view.write_box.private_box_view(self)
self.view.toggle_left_panel()
return super(UserButton, self).keypress(size, key)
class TopicButton(urwid.Button):
def __init__(self, stream_id: str, topic: str, model: Any) -> None:
self.caption = model.stream_dict[int(stream_id)]['name'] # stream name
self.title = topic
self.stream_id = int(stream_id)
class UnreadPMButton(urwid.Button):
def __init__(self, user_id: int, email: str) -> None:
self.user_id = user_id
self.email = email
|
[
"urwid.SelectableIcon",
"zulipterminal.config.is_command_key",
"urwid.connect_signal"
] |
[((703, 768), 'urwid.connect_signal', 'urwid.connect_signal', (['self', '"""click"""', 'controller.show_all_messages'], {}), "(self, 'click', controller.show_all_messages)\n", (723, 768), False, 'import urwid\n'), ((1237, 1265), 'zulipterminal.config.is_command_key', 'is_command_key', (['"""ENTER"""', 'key'], {}), "('ENTER', key)\n", (1251, 1265), False, 'from zulipterminal.config import is_command_key\n'), ((1668, 1727), 'urwid.connect_signal', 'urwid.connect_signal', (['self', '"""click"""', 'controller.show_all_pm'], {}), "(self, 'click', controller.show_all_pm)\n", (1688, 1727), False, 'import urwid\n'), ((2196, 2224), 'zulipterminal.config.is_command_key', 'is_command_key', (['"""ENTER"""', 'key'], {}), "('ENTER', key)\n", (2210, 2224), False, 'from zulipterminal.config import is_command_key\n'), ((3097, 3161), 'urwid.connect_signal', 'urwid.connect_signal', (['self', '"""click"""', 'controller.narrow_to_stream'], {}), "(self, 'click', controller.narrow_to_stream)\n", (3117, 3161), False, 'import urwid\n'), ((3712, 3740), 'zulipterminal.config.is_command_key', 'is_command_key', (['"""ENTER"""', 'key'], {}), "('ENTER', key)\n", (3726, 3740), False, 'from zulipterminal.config import is_command_key\n'), ((4898, 4926), 'zulipterminal.config.is_command_key', 'is_command_key', (['"""ENTER"""', 'key'], {}), "('ENTER', key)\n", (4912, 4926), False, 'from zulipterminal.config import is_command_key\n'), ((342, 381), 'urwid.SelectableIcon', 'urwid.SelectableIcon', (['[self.caption]', '(0)'], {}), '([self.caption], 0)\n', (362, 381), False, 'import urwid\n')]
|
import theano.tensor as T
import theano
from mozi.utils.utils import theano_unique
from mozi.utils.theano_utils import asfloatX
floatX = theano.config.floatX
if floatX == 'float64':
epsilon = 1.0e-8
else:
epsilon = 1.0e-6
def accuracy(y, y_pred):
L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.mean(L)
# L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
# return T.sum(L) / y.shape[0].astype(floatX)
def mse(y, y_pred):
return T.mean(T.sqr(y-y_pred))
def entropy(y, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
L = -(y * T.log(y_pred) + (1-y) * T.log(1-y_pred))
return T.mean(L)
# L = - T.sum(y * T.log(y_pred) + (1-y) * T.log(1-y_pred), axis=1)
# return T.mean(L)
def error(y, y_pred):
L = T.neq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.mean(L)
def recall(y, y_pred):
L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.sum(L) / y.shape[0].astype(floatX)
def precision(y, y_pred):
L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
return T.sum(L) / y_pred.shape[0].astype(floatX)
def f1(y, y_pred):
r = recall(y, y_pred)
p = precision(y, y_pred)
return 2 * p * r / (p + r)
def hingeloss(y, y_pred):
y_pred = T.clip(y_pred, 0., 1.0)
L = T.max(0, 1 - y * y_pred)
return T.mean(L)
def abs(y, y_pred):
return T.mean(T.abs_(y-y_pred))
def SGVB_bin(y, y_pred):
'''
This cost function is for variational autoencoder with binary inputs
'''
ypred, miu_e, logsig_e = y_pred
ypred = T.clip(ypred, epsilon, 1.0 - epsilon)
logpxz = -T.nnet.binary_crossentropy(ypred, y).sum(axis=1)
L = logpxz + 0.5 * (1 + 2*logsig_e - miu_e**2 - T.exp(2*logsig_e)).sum(axis=1)
return L.mean()
|
[
"theano.tensor.log",
"theano.tensor.sum",
"theano.tensor.abs_",
"theano.tensor.exp",
"theano.tensor.nnet.binary_crossentropy",
"theano.tensor.mean",
"theano.tensor.sqr",
"theano.tensor.max",
"theano.tensor.clip"
] |
[((324, 333), 'theano.tensor.mean', 'T.mean', (['L'], {}), '(L)\n', (330, 333), True, 'import theano.tensor as T\n'), ((534, 572), 'theano.tensor.clip', 'T.clip', (['y_pred', 'epsilon', '(1.0 - epsilon)'], {}), '(y_pred, epsilon, 1.0 - epsilon)\n', (540, 572), True, 'import theano.tensor as T\n'), ((639, 648), 'theano.tensor.mean', 'T.mean', (['L'], {}), '(L)\n', (645, 648), True, 'import theano.tensor as T\n'), ((832, 841), 'theano.tensor.mean', 'T.mean', (['L'], {}), '(L)\n', (838, 841), True, 'import theano.tensor as T\n'), ((1248, 1272), 'theano.tensor.clip', 'T.clip', (['y_pred', '(0.0)', '(1.0)'], {}), '(y_pred, 0.0, 1.0)\n', (1254, 1272), True, 'import theano.tensor as T\n'), ((1280, 1304), 'theano.tensor.max', 'T.max', (['(0)', '(1 - y * y_pred)'], {}), '(0, 1 - y * y_pred)\n', (1285, 1304), True, 'import theano.tensor as T\n'), ((1316, 1325), 'theano.tensor.mean', 'T.mean', (['L'], {}), '(L)\n', (1322, 1325), True, 'import theano.tensor as T\n'), ((1546, 1583), 'theano.tensor.clip', 'T.clip', (['ypred', 'epsilon', '(1.0 - epsilon)'], {}), '(ypred, epsilon, 1.0 - epsilon)\n', (1552, 1583), True, 'import theano.tensor as T\n'), ((479, 496), 'theano.tensor.sqr', 'T.sqr', (['(y - y_pred)'], {}), '(y - y_pred)\n', (484, 496), True, 'import theano.tensor as T\n'), ((931, 939), 'theano.tensor.sum', 'T.sum', (['L'], {}), '(L)\n', (936, 939), True, 'import theano.tensor as T\n'), ((1060, 1068), 'theano.tensor.sum', 'T.sum', (['L'], {}), '(L)\n', (1065, 1068), True, 'import theano.tensor as T\n'), ((1365, 1383), 'theano.tensor.abs_', 'T.abs_', (['(y - y_pred)'], {}), '(y - y_pred)\n', (1371, 1383), True, 'import theano.tensor as T\n'), ((587, 600), 'theano.tensor.log', 'T.log', (['y_pred'], {}), '(y_pred)\n', (592, 600), True, 'import theano.tensor as T\n'), ((611, 628), 'theano.tensor.log', 'T.log', (['(1 - y_pred)'], {}), '(1 - y_pred)\n', (616, 628), True, 'import theano.tensor as T\n'), ((1598, 1634), 'theano.tensor.nnet.binary_crossentropy', 'T.nnet.binary_crossentropy', (['ypred', 'y'], {}), '(ypred, y)\n', (1624, 1634), True, 'import theano.tensor as T\n'), ((1699, 1718), 'theano.tensor.exp', 'T.exp', (['(2 * logsig_e)'], {}), '(2 * logsig_e)\n', (1704, 1718), True, 'import theano.tensor as T\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 13:14:28 2020
@author: derek.bickhart-adm
"""
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('Agg')
from matplotlib.collections import BrokenBarHCollection
from matplotlib import cm
from itertools import cycle
from collections import defaultdict
import argparse
import pandas
import numpy as np
import pysam
def arg_parse():
parser = argparse.ArgumentParser(
description = "A tool to plot bin and contig level read depth differences in strain assignment"
)
parser.add_argument('-f', '--fai',
help="Input reference fasta index file for the bin",
required=True, type=str
)
parser.add_argument('-o', '--output',
help="Output file basename. Output files are {output}.wins and {output}.pdf",
required=True, type=str,
)
parser.add_argument('-b', '--bam',
help="Input CCS read depth bam file",
required=True, type=str
)
parser.add_argument('-h', '--human',
help="Input human-readable variant call file",
required=True, type=str
)
parser.add_argument('-i', '--binsize',
help="Bin size in bases [5000 bp]",
type = int, default=5000
)
return parser.parse_args(), parser
def main(args, parser):
# Get the contig length list
ctglens = dict()
with open(args.fai, 'r') as fai:
for l in fai:
s = l.rstrip().split()
ctglens[s[0]] = s[1]
# Create windows
winlist = defaultdict(list)
# offset bp to add for stitching contigs together in one line
ctgoffset = dict()
lastbp = 0
for c in ctglens:
ctgoffset[c] = lastbp + 100
for i in range(0, ctglens[c], args.binsize):
winlist[c].append(window(c, i, i + args.binsize))
lastbp += ctglens[c]
# read each sam region and count the reads
with pysam.AlignmentFile(args.bam, 'rb') as bamfile:
for c, w in winlist.items():
for i, win in enumerate(w):
count = 0
for s in bamfile.fetch(c, win.start, win.end):
if s.is_secondary:
continue
count += 1
winlist = updateWin(winlist, c, i, count)
# Now, read in the human readable text file and process that
hapset = set()
with open(args.human, 'r') as human:
human.readline()
for l in human:
s = l.rstrip().split()
# determine where the contig start falls
for i, win in enumerate(winlist[s[2]]):
if int(s[3]) < win.end and int(s[3]) >= win.start:
winlist = updateWin(winlist, s[2], i, int(s[6]), s[4])
print(f'Updating window: {s[2]} {win.start} {win.end} to {s[6]} for Hap {s[4]}')
hapset.add(s[4])
# OK, data is in! Let's try plotting
raw = defaultdict(list)
bars = list()
for c, w in winlist.items():
bars.append([ctgoffset[c], ctglens[c]])
for win in winlist:
for h in hapset:
raw["contig"].append(c)
raw["start"].append(win.start + ctgoffset[c])
raw["end"].append(win.end + ctgoffset[c])
raw["hap"].append(h)
raw["count"].append(win.getCount(h))
df = pandas.DataFrame(raw)
df.to_csv(args.output + '.wins', sep='\t', header=True)
fig = plt.figure(figsize=(6,8))
ax = df[['start', 'hap', 'count']].plot.area(x='start', y='count', colormap='viridis')
ax.add_collection(BrokenBarHCollection(bars, [-1, 1], facecolors=plt.get_cmap('tab20')))
ax.axis('tight')
plt.savefig(args.output + '.pdf')
def updateWin(winlist, contig, winidx, count, haplotype = 'REF'):
winlist[contig].count[haplotype] = count
return winlist
class window:
def __init__(self, contig, start, end):
self.contig = contig
self.start = start
self.end = end
self.count = defaultdict(int)
def getCount(self, hap):
if hap in self.count:
return self.count[hap]
else:
return 0
if __name__ == "__main__":
args, parser = arg_parse()
main(args, parser)
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"matplotlib.pyplot.get_cmap",
"pysam.AlignmentFile",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"matplotlib.use",
"matplotlib.pyplot.savefig"
] |
[((153, 174), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (167, 174), False, 'import matplotlib\n'), ((414, 542), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A tool to plot bin and contig level read depth differences in strain assignment"""'}), "(description=\n 'A tool to plot bin and contig level read depth differences in strain assignment'\n )\n", (437, 542), False, 'import argparse\n'), ((1810, 1827), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1821, 1827), False, 'from collections import defaultdict\n'), ((3258, 3275), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3269, 3275), False, 'from collections import defaultdict\n'), ((3708, 3729), 'pandas.DataFrame', 'pandas.DataFrame', (['raw'], {}), '(raw)\n', (3724, 3729), False, 'import pandas\n'), ((3805, 3831), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (3815, 3831), True, 'from matplotlib import pyplot as plt\n'), ((4045, 4078), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + '.pdf')"], {}), "(args.output + '.pdf')\n", (4056, 4078), True, 'from matplotlib import pyplot as plt\n'), ((2199, 2234), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['args.bam', '"""rb"""'], {}), "(args.bam, 'rb')\n", (2218, 2234), False, 'import pysam\n'), ((4422, 4438), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4433, 4438), False, 'from collections import defaultdict\n'), ((3996, 4017), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (4008, 4017), True, 'from matplotlib import pyplot as plt\n')]
|
import unittest
import pytest
class TestLocalRegistry(unittest.TestCase):
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_get_base_image_exists(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_get_base_image_download(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_get_secure_base_image_exists(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_get_secure_base_image_build(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_build_secure_base_image_download(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_is_running_false(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_is_running_true(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_run_already_running(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_run_success(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_run_failure(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_run_secure(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_stop_success(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_stop_not_running(self):
assert False
class TestStagingRegistries(unittest.TestCase):
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_target_registry(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_source_registry(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_stage_info(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_setup_success(self):
assert False
@pytest.mark.xfail(reason="Not Implemented", run=False)
def test_teardown(self):
assert False
|
[
"pytest.mark.xfail"
] |
[((83, 137), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (100, 137), False, 'import pytest\n'), ((207, 261), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (224, 261), False, 'import pytest\n'), ((333, 387), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (350, 387), False, 'import pytest\n'), ((464, 518), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (481, 518), False, 'import pytest\n'), ((594, 648), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (611, 648), False, 'import pytest\n'), ((729, 783), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (746, 783), False, 'import pytest\n'), ((848, 902), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (865, 902), False, 'import pytest\n'), ((966, 1020), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (983, 1020), False, 'import pytest\n'), ((1088, 1142), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1105, 1142), False, 'import pytest\n'), ((1202, 1256), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1219, 1256), False, 'import pytest\n'), ((1316, 1370), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1333, 1370), False, 'import pytest\n'), ((1429, 1483), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1446, 1483), False, 'import pytest\n'), ((1544, 1598), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1561, 1598), False, 'import pytest\n'), ((1713, 1767), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1730, 1767), False, 'import pytest\n'), ((1831, 1885), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1848, 1885), False, 'import pytest\n'), ((1949, 2003), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (1966, 2003), False, 'import pytest\n'), ((2062, 2116), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (2079, 2116), False, 'import pytest\n'), ((2178, 2232), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Implemented"""', 'run': '(False)'}), "(reason='Not Implemented', run=False)\n", (2195, 2232), False, 'import pytest\n')]
|
from service.crawler import Crawler
if __name__ == '__main__':
crawler = Crawler()
crawler.run()
|
[
"service.crawler.Crawler"
] |
[((79, 88), 'service.crawler.Crawler', 'Crawler', ([], {}), '()\n', (86, 88), False, 'from service.crawler import Crawler\n')]
|
import json
import requests
from scrapy.selector import Selector
def do_it():
r = requests.get("https://rusvectores.org/en/models/")
if r.status_code == 200:
body = r.content
out = {}
title = ''
data = Selector(text=body).css('h2, div > table')
for d in data:
tag = d.xpath('name()').get()
if tag == "h2":
title = d.css("::text").get()
else:
headers = d.css("tr>th::text")
hs = [h.get() for h in headers]
for row in d.css("tr"):
id = row.css("::attr(id)").get(default='')
desc = {}
for i, cell in enumerate(row.css('td')):
# print("\t", i, hs[i], ":", cell.css("::text").get())
link = cell.css("a::attr(href)").get(default='')
datum = cell.css("::text").get()
if link.startswith("/"): link = "https://rusvectores.org" + link
desc[hs[i]] = {"data": datum, "link": link} if link else datum
# print(i, link)
desc["title"] = title
desc["language"] = 'ru'
if id: out[id] = desc
with open('rusvectores.json', 'w') as fp:
fp.write(json.dumps({"rusvectores": out}, ensure_ascii=False, indent=5).replace('\\xa0', ' '))
pass
do_it()
|
[
"scrapy.selector.Selector",
"requests.get",
"json.dumps"
] |
[((87, 137), 'requests.get', 'requests.get', (['"""https://rusvectores.org/en/models/"""'], {}), "('https://rusvectores.org/en/models/')\n", (99, 137), False, 'import requests\n'), ((244, 263), 'scrapy.selector.Selector', 'Selector', ([], {'text': 'body'}), '(text=body)\n', (252, 263), False, 'from scrapy.selector import Selector\n'), ((1526, 1588), 'json.dumps', 'json.dumps', (["{'rusvectores': out}"], {'ensure_ascii': '(False)', 'indent': '(5)'}), "({'rusvectores': out}, ensure_ascii=False, indent=5)\n", (1536, 1588), False, 'import json\n')]
|
from selenium.webdriver.support.ui import WebDriverWait
from time import sleep
from utilities.BasePage import BasePage
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
class WaitlistTagsPage(BasePage):
client_add_tags_css = "*[data-testid='components_client-top-panel_add-tags']"
dd_client_tags_xpath = "//*[@id='components_client-top-panel_add-tag-select']/div/div[2]"
dd_mobile_client_tags_xpath = "//*[@id='containers_client_add-tag']/div[1]"
dd_client_tags_input_xpath = (
"//div[contains(@class, 'TagSelect__TagSelectField')]/div[1]/div[1]/div[2]/div[1]/input[1]"
)
tag_list_container_css = "*[components_client-tag-stack_tag-list-container]"
btn_remove_client_tag_css = "*[data-testid='components_tag_remove-tag']"
settings_tags_css = "//button[normalize-space()='Tags']"
txt_tag_title_xpath = "//input[@name='title']"
btn_settings_add_tag_css = "*[data-testid='forms_settings_practice-details_add-tag']"
txt_settings_add_color_xpath = "//div[@data-testid='forms_settings_practice-details_add-tag_color']"
btn_settings_edit_tag_xpath = "//button[@aria-label='Edit Tag']"
btn_settings_delete_tag = "//button[@aria-label='Delete Tag']"
get_client_tag_name_xpaths = "//div[@data-testid='components_client-tag-stack_tag-list-container']/span"
settings_tags_xpaths = "//div[@data-testid='components_table']/div[1]/div"
btn_confirm_delete_settings_tag_xpath = "//button[@aria-label='delete tag']"
txt_settings_edit_tag_xpath = "//div[contains(@class,'EditTag')]//input[@name='title']"
btn_save_settings_tag_css = "*[data-testid='containers_settings_practice-details_save-tag']"
select_first_tag_xpath = "(//div[@data-testid='components_table']/div[1]/div[1]/div[2]/span[1]/span)[1]"
btn_mobile_close_client_profile_xpath = "//button[@aria-label='Close']"
# mobile elements
#mobile_client_tag_id = "containers_client_add-tag"
# waitlist elements
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
self.wait = WebDriverWait(self.driver, 10)
# tag methods
def clk_client_add_tags(self):
self.click("CSS_SELECTOR", self.client_add_tags_css)
def sel_client_tag(self, tag_name):
self.click("XPATH", self.dd_client_tags_xpath)
self.click("XPATH", "//div[contains(@class, 'react-select__menu')]//div[text()='OPTION_VALUE']".replace("OPTION_VALUE", tag_name))
def clk_remove_client_tag(self):
self.click("CSS_SELECTOR", self.btn_remove_client_tag_css)
def clk_settings_tags(self):
self.click("XPATH", self.settings_tags_css)
def clk_settings_add_tag(self):
self.click("CSS_SELECTOR", self.btn_settings_add_tag_css)
def clk_settings_edit_tag(self):
self.click("XPATH", self.btn_settings_edit_tag_xpath)
def clk_settings_delete_tag(self):
self.click("XPATH", self.btn_settings_delete_tag)
def input_settings_tag_title(self, tag_title):
self.enter("XPATH", self.txt_tag_title_xpath, tag_title)
def input_settings_tag_color(self, color):
self.enter("XPATH", self.txt_settings_add_color_xpath, color)
def get_client_tags_list(self):
self.wait_for_element_visibility("CSS_SELECTOR", self.client_add_tags_css)
no_of_tags = self.get_length(self.get_client_tag_name_xpaths)
tags_name_list = []
for i in range(1, no_of_tags + 1, 2):
tag_name_x = self.get_text("XPATH", "//div[@data-testid='components_client-tag-stack_tag-list-container']/span[" + str(i) + "]")
tags_name_list.append(tag_name_x)
print("tags name list ", tags_name_list)
return tags_name_list
def delete_settings_tag(self, tag_name):
self.wait_for_element_visibility("XPATH", self.txt_tag_title_xpath)
no_of_tags = self.get_length(self.settings_tags_xpaths)
tags_name_list = []
for i in range(1, no_of_tags + 1):
tag_name_x = self.get_text("XPATH", "//div[@data-testid='components_table']/div[1]/div[" + str(i) + "]/div[2]/span[1]/span")
tags_name_list.append(tag_name_x)
if tag_name_x == tag_name:
select_tag_xpath = "//div[@data-testid='components_table']/div[1]/div[" + str(i) + "]/div[3]/span/button[2]"
sleep(0.5)
self.click_js("XPATH", select_tag_xpath)
break
else:
continue
def clk_confirm_delete_settings_tag(self):
self.click("XPATH", self.btn_confirm_delete_settings_tag_xpath)
def get_settings_tags_list(self):
self.wait_for_element_visibility("XPATH", self.txt_tag_title_xpath)
no_of_tags = self.get_length(self.settings_tags_xpaths)
tags_name_list = []
for i in range(1, no_of_tags + 1):
tag_name_x = self.get_text("XPATH", "//div[@data-testid='components_table']/div[1]/div[" + str(i) + "]/div[2]/span[1]/span")
tags_name_list.append(tag_name_x)
return tags_name_list
def edit_settings_tag(self, tag_name):
self.wait_for_element_visibility("XPATH", self.txt_tag_title_xpath)
no_of_tags = self.get_length(self.settings_tags_xpaths)
tags_name_list = []
for i in range(1, no_of_tags + 1):
tag_name_x = self.get_text("XPATH", "//div[@data-testid='components_table']/div[1]/div[" + str(i) + "]/div[2]/span[1]/span")
tags_name_list.append(tag_name_x)
if tag_name_x == tag_name:
select_tag_xpath = "//div[@data-testid='components_table']/div[1]/div[" + str(i) + "]/div[3]/span/button[1]"
sleep(0.5)
self.click_js("XPATH", select_tag_xpath)
break
else:
continue
def input_settings_new_tag_name(self, new_tag_title):
self.clear_field("XPATH", self.txt_settings_edit_tag_xpath)
self.enter("XPATH", self.txt_settings_edit_tag_xpath, new_tag_title)
def clk_save_settings_tag(self):
self.click("CSS_SELECTOR", self.btn_save_settings_tag_css)
def get_number_of_settings_tag(self):
self.wait_for_element_visibility("XPATH", self.txt_tag_title_xpath)
no_of_tags = self.get_length(self.settings_tags_xpaths)
return no_of_tags
'''
def sel_mobile_client_tag(self, tag_name):
print("tag name ", tag_name)
mobile_client_tag_id = "containers_client_add-tag"
element = self.wait_for_element_visibility("ID", mobile_client_tag_id)
select = Select(element)
select.select_by_value(tag_name)
'''
def sel_mobile_client_tag(self, tag_name):
print("tage name ", tag_name)
self.click("XPATH", self.dd_mobile_client_tags_xpath)
self.enter_character("XPATH", self.dd_mobile_client_tags_xpath, tag_name)
#self.enter("XPATH", self.dd_mobile_client_tags_xpath, Keys.RETURN)
self.click("XPATH", "//div[contains(@class, 'react-select__menu')]//div[text()='OPTION_VALUE']".replace("OPTION_VALUE", tag_name))
'''
def sel_mobile_client_tag(self, tag_name):
print("tage name ", tag_name)
tag_element = self.wait_for_element_visibility("XPATH", self.dd_mobile_client_tags_xpath)
self.click("XPATH", self.dd_mobile_client_tags_xpath)
self.driver.execute_script("arguments[0].value= '';", tag_element)
self.driver.execute_script("arguments[0].value= '" + tag_name + "';", tag_element)
'''
def get_mobile_client_tags_list(self):
self.wait_for_element_visibility("XPATH", self.dd_mobile_client_tags_xpath)
no_of_tags = self.get_length(self.get_client_tag_name_xpaths)
print("\n no of tags ", no_of_tags)
tags_name_list = []
#for i in range(1, no_of_tags + 1, 4):
for i in range(1, no_of_tags, 4):
tag_name_x = self.get_text("XPATH", "//div[@data-testid='components_client-tag-stack_tag-list-container']/span[" + str(i) + "]")
tags_name_list.append(tag_name_x)
print("tags name list ", tags_name_list)
return tags_name_list
def clk_first_settings_tag(self):
select_tag_xpath = "//div[@data-testid='components_table']/div[1]/div[1]/div[3]/span/button[2]"
sleep(0.5)
self.click_js("XPATH", select_tag_xpath)
def get_no_of_settings_tags(self):
self.wait_for_element_visibility("XPATH", self.txt_tag_title_xpath)
no_of_tags = self.get_length(self.settings_tags_xpaths)
return no_of_tags
def clk_mobile_client_profile_close(self):
self.click("XPATH", self.btn_mobile_close_client_profile_xpath)
# waitlist methods
|
[
"selenium.webdriver.support.ui.WebDriverWait",
"time.sleep"
] |
[((2017, 2047), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self.driver', '(10)'], {}), '(self.driver, 10)\n', (2030, 2047), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((7526, 7536), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7531, 7536), False, 'from time import sleep\n'), ((4051, 4061), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (4056, 4061), False, 'from time import sleep\n'), ((5202, 5212), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (5207, 5212), False, 'from time import sleep\n')]
|
"""
AUTOR: Juanjo
FECHA DE CREACIÓN: 24/01/2019
"""
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Length
class PostForm(FlaskForm):
title = StringField('Título', validators=[DataRequired(), Length(max=128)])
content = TextAreaField('Contenido')
submit = SubmitField('Enviar')
|
[
"wtforms.SubmitField",
"wtforms.validators.DataRequired",
"wtforms.validators.Length",
"wtforms.TextAreaField"
] |
[((323, 349), 'wtforms.TextAreaField', 'TextAreaField', (['"""Contenido"""'], {}), "('Contenido')\n", (336, 349), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((363, 384), 'wtforms.SubmitField', 'SubmitField', (['"""Enviar"""'], {}), "('Enviar')\n", (374, 384), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((276, 290), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (288, 290), False, 'from wtforms.validators import DataRequired, Length\n'), ((292, 307), 'wtforms.validators.Length', 'Length', ([], {'max': '(128)'}), '(max=128)\n', (298, 307), False, 'from wtforms.validators import DataRequired, Length\n')]
|
import os
import flask
import flask_login
import lib.utils as utils
from lib.admin import account
from lib.admin.database import db
from lib.admin.bcrypt import bcrypt
from lib.admin import validity, const
from lib.admin.admin import admin, login_manager
from lib.eviltwin.eviltwin import eviltwin
from lib.interface.interface import interface
from lib.handshake.handshake import handshake
from lib.interface.backend import InterfaceBackend
from lib.accesspoints.accesspoints import accesspoints
app = flask.Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{const.database_path}"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SECRET_KEY"] = os.urandom(0x200)
app.config["JSON_SORT_KEYS"] = False
# Register blueprints
admin_url_prefix = utils.gen_admin_url_prefix()
app.register_blueprint(admin, url_prefix=f"/{admin_url_prefix}")
app.register_blueprint(eviltwin, url_prefix="/eviltwin")
app.register_blueprint(interface, url_prefix="/interface")
app.register_blueprint(handshake, url_prefix="/handshake")
app.register_blueprint(accesspoints, url_prefix="/accesspoints")
# Made for reverse engineers
app.register_blueprint(eviltwin, url_prefix="/api/v1/router")
# Bcrypt
bcrypt.init_app(app)
# Login Manager
login_manager.init_app(app)
login_manager.login_view = "/"
# Database
db.app = app
db.init_app(app)
# Create config
utils.create_js_config_file(admin_url_prefix)
# Account
if not os.path.exists(const.database_path):
db_dirname = os.path.dirname(const.database_path)
if db_dirname and not os.path.exists(db_dirname):
os.makedirs(db_dirname)
# create admin account here
print("You must create an account")
username = account.get_user_input(validator=validity.check_username)
password = account.get_user_input(
validator=validity.check_password, is_password=True
)
# initialize the database
db.create_all()
print(account.create_user(username, password))
@app.after_request
def add_header(r):
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers["Cache-Control"] = "public, max-age=0"
return r
@app.route("/")
def index():
if flask_login.current_user.is_authenticated:
# Admin should not be able to see view page
return flask.redirect(flask.url_for("admin.dashboard"))
return flask.render_template("index.html")
@app.route("/", methods=["GET", "POST"], defaults={"path": ""})
@app.route("/<path:path>", methods=["GET", "POST"])
def fallback(path):
return flask.redirect(flask.url_for("index"))
def init_proc():
"""Process to run at run time"""
utils.kill_all()
utils.stop_services()
InterfaceBackend.disable_interfaces()
if __name__ == "__main__":
print(f"\nDashboard: http://localhost/{admin_url_prefix}\n")
init_proc()
app.run(host="0.0.0.0", port=80, debug=False)
|
[
"lib.admin.bcrypt.bcrypt.init_app",
"os.makedirs",
"lib.interface.backend.InterfaceBackend.disable_interfaces",
"lib.admin.database.db.create_all",
"lib.admin.admin.login_manager.init_app",
"lib.utils.gen_admin_url_prefix",
"flask.Flask",
"lib.admin.database.db.init_app",
"os.path.exists",
"os.path.dirname",
"lib.admin.account.get_user_input",
"lib.utils.kill_all",
"lib.utils.stop_services",
"flask.url_for",
"lib.admin.account.create_user",
"flask.render_template",
"lib.utils.create_js_config_file",
"os.urandom"
] |
[((524, 545), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (535, 545), False, 'import flask\n'), ((706, 721), 'os.urandom', 'os.urandom', (['(512)'], {}), '(512)\n', (716, 721), False, 'import os\n'), ((807, 835), 'lib.utils.gen_admin_url_prefix', 'utils.gen_admin_url_prefix', ([], {}), '()\n', (833, 835), True, 'import lib.utils as utils\n'), ((1254, 1274), 'lib.admin.bcrypt.bcrypt.init_app', 'bcrypt.init_app', (['app'], {}), '(app)\n', (1269, 1274), False, 'from lib.admin.bcrypt import bcrypt\n'), ((1295, 1322), 'lib.admin.admin.login_manager.init_app', 'login_manager.init_app', (['app'], {}), '(app)\n', (1317, 1322), False, 'from lib.admin.admin import admin, login_manager\n'), ((1384, 1400), 'lib.admin.database.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (1395, 1400), False, 'from lib.admin.database import db\n'), ((1421, 1466), 'lib.utils.create_js_config_file', 'utils.create_js_config_file', (['admin_url_prefix'], {}), '(admin_url_prefix)\n', (1448, 1466), True, 'import lib.utils as utils\n'), ((1488, 1523), 'os.path.exists', 'os.path.exists', (['const.database_path'], {}), '(const.database_path)\n', (1502, 1523), False, 'import os\n'), ((1543, 1579), 'os.path.dirname', 'os.path.dirname', (['const.database_path'], {}), '(const.database_path)\n', (1558, 1579), False, 'import os\n'), ((1764, 1821), 'lib.admin.account.get_user_input', 'account.get_user_input', ([], {'validator': 'validity.check_username'}), '(validator=validity.check_username)\n', (1786, 1821), False, 'from lib.admin import account\n'), ((1838, 1913), 'lib.admin.account.get_user_input', 'account.get_user_input', ([], {'validator': 'validity.check_password', 'is_password': '(True)'}), '(validator=validity.check_password, is_password=True)\n', (1860, 1913), False, 'from lib.admin import account\n'), ((1968, 1983), 'lib.admin.database.db.create_all', 'db.create_all', ([], {}), '()\n', (1981, 1983), False, 'from lib.admin.database import db\n'), ((2506, 2541), 'flask.render_template', 'flask.render_template', (['"""index.html"""'], {}), "('index.html')\n", (2527, 2541), False, 'import flask\n'), ((2803, 2819), 'lib.utils.kill_all', 'utils.kill_all', ([], {}), '()\n', (2817, 2819), True, 'import lib.utils as utils\n'), ((2825, 2846), 'lib.utils.stop_services', 'utils.stop_services', ([], {}), '()\n', (2844, 2846), True, 'import lib.utils as utils\n'), ((2852, 2889), 'lib.interface.backend.InterfaceBackend.disable_interfaces', 'InterfaceBackend.disable_interfaces', ([], {}), '()\n', (2887, 2889), False, 'from lib.interface.backend import InterfaceBackend\n'), ((1646, 1669), 'os.makedirs', 'os.makedirs', (['db_dirname'], {}), '(db_dirname)\n', (1657, 1669), False, 'import os\n'), ((1995, 2034), 'lib.admin.account.create_user', 'account.create_user', (['username', 'password'], {}), '(username, password)\n', (2014, 2034), False, 'from lib.admin import account\n'), ((2712, 2734), 'flask.url_for', 'flask.url_for', (['"""index"""'], {}), "('index')\n", (2725, 2734), False, 'import flask\n'), ((1609, 1635), 'os.path.exists', 'os.path.exists', (['db_dirname'], {}), '(db_dirname)\n', (1623, 1635), False, 'import os\n'), ((2460, 2492), 'flask.url_for', 'flask.url_for', (['"""admin.dashboard"""'], {}), "('admin.dashboard')\n", (2473, 2492), False, 'import flask\n')]
|