code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""
HexitecDAQ for Hexitec ODIN control.
<NAME>, STFC Detector Systems Software Group
"""
import logging
from functools import partial
from tornado.ioloop import IOLoop
from odin.adapters.adapter import ApiAdapterRequest
from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError
from hexitec.GenerateConfigFiles import GenerateConfigFiles
import h5py
from datetime import datetime
import collections.abc
import time
import os
class HexitecDAQ():
"""
Encapsulates all the functionaility to initiate the DAQ.
Configures the Frame Receiver and Frame Processor plugins
Configures the HDF File Writer Plugin
Configures the Live View Plugin
"""
THRESHOLDOPTIONS = ["value", "filename", "none"]
COMPRESSIONOPTIONS = ["none", "blosc"]
# Define timestamp format
DATE_FORMAT = '%Y%m%d_%H%M%S.%f'
def __init__(self, parent, save_file_dir="", save_file_name=""):
"""
Initialize the HexitecDAQ object.
This constructor initializes the HexitecDAQ object.
:param parent: Reference to adapter object
:param save_file_dir: save processed file to directory
:param save_file_name: save processed file name as
"""
self.parent = parent
self.adapters = {}
self.file_dir = save_file_dir
self.file_name = save_file_name
self.in_progress = False
self.is_initialised = False
# these varables used to tell when an acquisiton is completed
self.frame_start_acquisition = 0 # number of frames received at start of acq
self.frame_end_acquisition = 0 # number of frames at end of acq (acq number)
# First initialisation fudges data acquisition (but without writing to disk)
self.first_initialisation = True
self.file_writing = False
self.config_dir = ""
self.config_files = {
"fp": "",
"fr": ""
}
self.hdf_file_location = ""
self.hdf_retry = 0
# Construct path to hexitec source code
cwd = os.getcwd()
index = cwd.rfind("control")
self.base_path = cwd[:index]
# ParameterTree variables
self.sensors_layout = "2x2"
self.compression_type = "none"
# Note that these four enable(s) are cosmetic only - written as meta data
# actual control is exercised by odin_server.js via sequence config files
# loading select(ed) plugins
self.addition_enable = False
self.discrimination_enable = False
self.calibration_enable = False
self.next_frame_enable = False
self.pixel_grid_size = 3
self.gradients_filename = ""
self.intercepts_filename = ""
self.bin_end = 8000
self.bin_start = 0
self.bin_width = 10.0
self.number_histograms = int((self.bin_end - self.bin_start) / self.bin_width)
self.max_frames_received = 10
self.pass_processed = False
self.pass_raw = False
# Look at histogram/hdf to determine when processing finished:
self.plugin = "histogram"
self.master_dataset = "spectra_bins"
self.extra_datasets = []
# Processing timeout variables, support adapter's watchdog
self.processed_timestamp = 0
self.frames_processed = 0
self.shutdown_processing = False
self.threshold_filename = ""
self.threshold_mode = "value"
self.threshold_value = 120
self.rows, self.columns = 160, 160
self.pixels = self.rows * self.columns
self.number_frames = 10
# Diagnostics
self.daq_start_time = 0
self.fem_not_busy = 0
self.daq_stop_time = 0
self.param_tree = ParameterTree({
"diagnostics": {
"daq_start_time": (lambda: self.daq_start_time, None),
"daq_stop_time": (lambda: self.daq_stop_time, None),
"fem_not_busy": (lambda: self.fem_not_busy, None),
},
"receiver": {
"connected": (partial(self._is_od_connected, adapter="fr"), None),
"configured": (self._is_fr_configured, None),
"config_file": (partial(self.get_config_file, "fr"), None)
},
"processor": {
"connected": (partial(self._is_od_connected, adapter="fp"), None),
"configured": (self._is_fp_configured, None),
"config_file": (partial(self.get_config_file, "fp"), None)
},
"file_info": {
"enabled": (lambda: self.file_writing, self.set_file_writing),
"file_name": (lambda: self.file_name, self.set_file_name),
"file_dir": (lambda: self.file_dir, self.set_data_dir)
},
"in_progress": (lambda: self.in_progress, None),
"config": {
"addition": {
"enable": (lambda: self.addition_enable, self._set_addition_enable),
"pixel_grid_size": (lambda: self.pixel_grid_size, self._set_pixel_grid_size)
},
"calibration": {
"enable": (lambda: self.calibration_enable, self._set_calibration_enable),
"gradients_filename": (lambda: self.gradients_filename,
self._set_gradients_filename),
"intercepts_filename": (lambda: self.intercepts_filename,
self._set_intercepts_filename)
},
"discrimination": {
"enable": (lambda: self.discrimination_enable, self._set_discrimination_enable),
"pixel_grid_size": (lambda: self.pixel_grid_size, self._set_pixel_grid_size)
},
"histogram": {
"bin_end": (lambda: self.bin_end, self._set_bin_end),
"bin_start": (lambda: self.bin_start, self._set_bin_start),
"bin_width": (lambda: self.bin_width, self._set_bin_width),
"max_frames_received": (lambda: self.max_frames_received,
self._set_max_frames_received),
"pass_processed": (lambda: self.pass_processed, self._set_pass_processed),
"pass_raw": (lambda: self.pass_raw, self._set_pass_raw)
},
"next_frame": {
"enable": (lambda: self.next_frame_enable, self._set_next_frame_enable)
},
"threshold": {
"threshold_filename": (lambda: self.threshold_filename,
self._set_threshold_filename),
"threshold_mode": (lambda: self.threshold_mode, self._set_threshold_mode),
"threshold_value": (lambda: self.threshold_value, self._set_threshold_value)
}
},
"compression_type": (self._get_compression_type, self._set_compression_type),
"sensors_layout": (self._get_sensors_layout, self._set_sensors_layout)
})
self.update_rows_columns_pixels()
# Placeholder for GenerateConfigFiles instance generating json files
self.gcf = None
def initialize(self, adapters):
"""Initialise adapters and related parameter tree entries."""
self.adapters["fp"] = adapters['fp']
self.adapters["fr"] = adapters['fr']
self.adapters["file_interface"] = adapters['file_interface']
self.get_config_file("fp")
self.get_config_file("fr")
self.is_initialised = True
def start_acquisition(self, number_frames):
"""Ensure the odin data FP and FR are configured, and turn on File Writing."""
logging.debug("Setting up Acquisition")
fr_status = self.get_od_status("fr")
fp_status = self.get_od_status("fp")
if self._is_od_connected(fr_status) is False:
logging.error("Cannot start Acquisition: Frame Receiver not found")
return
elif self._is_fr_configured(fr_status) is False:
self._config_odin_data("fr")
else:
logging.debug("Frame Receiver Already connected/configured")
if self._is_od_connected(fp_status) is False:
logging.error("Cannot Start Acquisition: Frame Processor not found")
return
elif self._is_fp_configured(fp_status) is False:
self._config_odin_data("fp")
else:
logging.debug("Frame Processor Already connected/configured")
hdf_status = fp_status.get('hdf', None)
if hdf_status is None:
fp_status = self.get_od_status('fp')
# Get current frame written number. If not found, assume FR
# just started up and it will be 0
hdf_status = fp_status.get('hdf', {"frames_processed": 0})
self.frame_start_acquisition = hdf_status['frames_processed']
self.frame_end_acquisition = number_frames
logging.info("FRAME START ACQ: %d END ACQ: %d",
self.frame_start_acquisition,
self.frame_start_acquisition + number_frames)
self.in_progress = True
# Reset timeout watchdog
self.processed_timestamp = time.time()
logging.debug("Starting File Writer")
if self.first_initialisation:
# First initialisation captures data without writing to disk
# therefore don't enable file writing here
pass # pragma: no cover
else:
self.set_file_writing(True)
# Diagnostics:
self.daq_start_time = '%s' % (datetime.now().strftime(HexitecDAQ.DATE_FORMAT))
# Wait while fem(s) finish sending data
IOLoop.instance().call_later(1.3, self.acquisition_check_loop)
def acquisition_check_loop(self):
"""Wait for acquisition to complete without blocking current thread."""
bBusy = self.parent.fems[0].hardware_busy
if bBusy:
IOLoop.instance().call_later(0.5, self.acquisition_check_loop)
else:
self.fem_not_busy = '%s' % (datetime.now().strftime(HexitecDAQ.DATE_FORMAT))
# Reset timeout watchdog
self.processed_timestamp = time.time()
self.frames_processed = 0
IOLoop.instance().call_later(0.5, self.processing_check_loop)
def processing_check_loop(self):
"""Check that the processing has completed."""
if self.first_initialisation:
# First initialisation runs without file writing; Stop acquisition
# without reopening (non-existent) file to add meta data
self.first_initialisation = False
self.in_progress = False
# Delay calling stop_acquisition, otherwise software may beat fem to it
IOLoop.instance().call_later(2.0, self.stop_acquisition)
return
# Not fudge initialisation; Check HDF/histogram processing progress
processing_status = self.get_od_status('fp').get(self.plugin, {'frames_processed': 0})
if processing_status['frames_processed'] == self.frame_end_acquisition:
delay = 1.0
IOLoop.instance().call_later(delay, self.stop_acquisition)
logging.debug("Acquisition Complete")
# All required frames acquired; if either of frames based datasets
# selected, wait for hdf file to close
IOLoop.instance().call_later(delay, self.hdf_closing_loop)
else:
# Not all frames processed yet; Check data still in flow
if processing_status['frames_processed'] == self.frames_processed:
# No frames processed in at least 0.5 sec, did processing time out?
if self.shutdown_processing:
self.shutdown_processing = False
self.in_progress = False
# Don't turn off FileWriterPlugin; Wait for EndOfAcquisition to flush out histograms
self.daq_stop_time = '%s' % (datetime.now().strftime(HexitecDAQ.DATE_FORMAT))
self.file_writing = False
return
else:
# Data still bein' processed
self.processed_timestamp = time.time()
self.frames_processed = processing_status['frames_processed']
# Wait 0.5 seconds and check again
IOLoop.instance().call_later(.5, self.processing_check_loop)
def stop_acquisition(self):
"""Disable file writing so processing can access the saved data to add Meta data."""
self.daq_stop_time = '%s' % (datetime.now().strftime(HexitecDAQ.DATE_FORMAT))
self.set_file_writing(False)
def hdf_closing_loop(self):
"""Wait for processing to complete but don't block, before prep to write meta data."""
hdf_status = self.get_od_status('fp').get('hdf', {"writing": True})
if hdf_status['writing']:
IOLoop.instance().call_later(0.5, self.hdf_closing_loop)
else:
self.hdf_file_location = self.file_dir + self.file_name + '_000001.h5'
# Check file exists before reopening to add metadata
if os.path.exists(self.hdf_file_location):
self.prepare_hdf_file()
else:
self.parent.fems[0]._set_status_error("No file to add meta: %s" %
self.hdf_file_location)
self.in_progress = False
def prepare_hdf_file(self):
"""Re-open HDF5 file, prepare meta data."""
try:
hdf_file = h5py.File(self.hdf_file_location, 'r+')
for fem in self.parent.fems:
fem._set_status_message("Reopening file to add meta data..")
self.hdf_retry = 0
except IOError as e:
# Let's retry a couple of times in case file temporary busy
if self.hdf_retry < 6:
self.hdf_retry += 1
logging.warning(" Re-try attempt: %s Reopen file, because: %s" %
(self.hdf_retry, e))
IOLoop.instance().call_later(0.5, self.hdf_closing_loop)
return
logging.error("Failed to open '%s' with error: %s" % (self.hdf_file_location, e))
self.in_progress = False
for fem in self.parent.fems:
fem._set_status_error("Error reopening HDF file: %s" % e)
return
error_code = 0
# Create metadata group, add dataset to it and pass to write function
parent_metadata_group = hdf_file.create_group("hexitec")
parent_tree_dict = self.parent.param_tree.get('')
error_code = self.write_metadata(parent_metadata_group, parent_tree_dict)
# TODO: Hacked until frame_process_adapter updated to use parameter tree
hdf_metadata_group = hdf_file.create_group("hdf")
hdf_tree_dict = self.adapters['fp']._param
# Only "hexitec" group contain filename entries, ignore return value of write_metadata
self.write_metadata(hdf_metadata_group, hdf_tree_dict)
if (error_code == 0):
for fem in self.parent.fems:
fem._set_status_message("Meta data added")
else:
for fem in self.parent.fems:
fem._set_status_error("Meta data writer unable to access file(s)!")
hdf_file.close()
self.in_progress = False
def build_metadata_attributes(self, param_tree_dict, metadata_group):
"""Build metadata attributes from parameter tree."""
for param, val in param_tree_dict.items():
if val is None:
# Replace None or TypeError will be thrown as:
# ("Object dtype dtype('O') has no native HDF5 equivalent")
val = "N/A"
metadata_group.attrs[param] = val
return metadata_group
def write_metadata(self, metadata_group, param_tree_dict):
"""Write parameter tree(s) and config files as meta data."""
param_tree_dict = self._flatten_dict(param_tree_dict)
# Build metadata attributes from dictionary
self.build_metadata_attributes(param_tree_dict, metadata_group)
# Only write parent's (Hexitec class) parameter tree's config files once
if metadata_group.name == u'/hexitec':
# Add additional attribute to record current date
metadata_group.attrs['runDate'] = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
# Write the configuration files into the metadata group
self.config_ds = {}
str_type = h5py.special_dtype(vlen=str)
# Write contents of config file, and selected coefficients file(s)
file_name = ['detector/fems/fem_0/hexitec_config']
if self.calibration_enable:
file_name.append('detector/daq/config/calibration/gradients_filename')
file_name.append('detector/daq/config/calibration/intercepts_filename')
if self.threshold_mode == self.THRESHOLDOPTIONS[1]: # = "filename"
file_name.append('detector/daq/config/threshold/threshold_filename')
for param_file in file_name:
if param_file not in param_tree_dict:
continue
# Only attempt to open file if it exists
file_name = param_tree_dict[param_file]
if os.path.isfile(file_name):
self.config_ds[param_file] = \
metadata_group.create_dataset(param_file, shape=(1,), dtype=str_type)
try:
with open(file_name, 'r') as xml_file:
self.config_ds[param_file][:] = xml_file.read()
except IOError as e:
logging.error("Failed to read %s XML file %s : %s " %
(param_file, file_name, e))
return -1
except Exception as e:
logging.error("Exception creating metadata for %s XML file %s : %s" %
(param_file, param_file, e))
return -2
logging.debug("Key '%s'; Successfully read file '%s'" % (param_file, file_name))
else:
logging.error("Key: %s's file: %s. Doesn't exist!" % (param_file, file_name))
return -3
return 0
def _flatten_dict(self, d, parent_key='', sep='/'):
"""Flatten a dictionary of nested dictionary into single dictionary of key-value pairs."""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def _is_od_connected(self, status=None, adapter=""):
if status is None:
status = self.get_od_status(adapter)
return status.get("connected", False)
def _is_fr_configured(self, status={}):
if status.get('status') is None:
status = self.get_od_status("fr")
config_status = status.get("status", {}).get("configuration_complete", False)
return config_status
def _is_fp_configured(self, status=None):
status = self.get_od_status("fp")
config_status = status.get("plugins") # if plugins key exists, it has been configured
return config_status is not None
def get_od_status(self, adapter):
"""Get status from adapter."""
if not self.is_initialised:
return {"Error": "Adapter not initialised with references yet"}
try:
request = ApiAdapterRequest(None, content_type="application/json")
response = self.adapters[adapter].get("status", request)
response = response.data["value"][0]
except KeyError:
logging.warning("%s Adapter Not Found" % adapter)
response = {"Error": "Adapter {} not found".format(adapter)}
finally:
return response
def get_config_file(self, adapter):
"""Get config file from adapter."""
if not self.is_initialised:
# IAC not setup yet
return ""
try:
return_val = ""
request = ApiAdapterRequest(None)
response = self.adapters['file_interface'].get('', request).data
self.config_dir = response['config_dir']
for config_file in response["{}_config_files".format(adapter)]:
if "hexitec" in config_file.lower():
return_val = config_file
break
else: # else of for loop: calls if loop finished without hitting break
# just return the first config file found
return_val = response["{}_config_files".format(adapter)][0]
except KeyError as key_error:
logging.warning("KeyError when trying to get config file: %s" % key_error)
return_val = ""
finally:
self.config_files[adapter] = return_val
return return_val
def set_data_dir(self, directory):
"""Set directory of processed file."""
self.file_dir = directory
def set_number_frames(self, number_frames):
"""Set number of frames to be acquired."""
self.number_frames = number_frames
def set_file_name(self, name):
"""Set processed file name."""
self.file_name = name
def set_file_writing(self, writing):
"""Update processed file details, file writing and histogram setting."""
command = "config/hdf/frames"
request = ApiAdapterRequest(self.file_dir, content_type="application/json")
# request.body = "{}".format(self.number_frames)
request.body = "{}".format(0)
self.adapters["fp"].put(command, request)
# send command to Odin Data
command = "config/hdf/file/path"
request = ApiAdapterRequest(self.file_dir, content_type="application/json")
self.adapters["fp"].put(command, request)
command = "config/hdf/file/name"
request.body = self.file_name
self.adapters["fp"].put(command, request)
command = "config/hdf/write"
request.body = "{}".format(writing)
self.adapters["fp"].put(command, request)
# Target both config/histogram/max_frames_received and own ParameterTree
self._set_max_frames_received(self.number_frames)
command = "config/histogram/max_frames_received"
request = ApiAdapterRequest(self.file_dir, content_type="application/json")
request.body = "{}".format(self.number_frames)
self.adapters["fp"].put(command, request)
# Finally, update own file_writing so FEM(s) know the status
self.file_writing = writing
def _config_odin_data(self, adapter):
config = os.path.join(self.config_dir, self.config_files[adapter])
config = os.path.expanduser(config)
if not config.startswith('/'):
config = '/' + config
logging.debug(config)
request = ApiAdapterRequest(config, content_type="application/json")
command = "config/config_file"
_ = self.adapters[adapter].put(command, request)
def update_rows_columns_pixels(self):
"""Update rows, columns and pixels from selected sensors_layout value.
e.g. sensors_layout = "3x2" => 3 rows of sensors by 2 columns of sensors
"""
self.sensors_rows, self.sensors_columns = self.sensors_layout.split("x")
self.rows = int(self.sensors_rows) * 80
self.columns = int(self.sensors_columns) * 80
self.pixels = self.rows * self.columns
def _set_addition_enable(self, addition_enable):
self.addition_enable = addition_enable
def _set_calibration_enable(self, calibration_enable):
self.calibration_enable = calibration_enable
def _set_discrimination_enable(self, discrimination_enable):
self.discrimination_enable = discrimination_enable
def _set_next_frame_enable(self, next_frame_enable):
self.next_frame_enable = next_frame_enable
def _set_pixel_grid_size(self, size):
if (size in [3, 5]):
self.pixel_grid_size = size
else:
raise ParameterTreeError("Must be either 3 or 5")
def _set_gradients_filename(self, gradients_filename):
gradients_filename = self.base_path + gradients_filename
if (os.path.isfile(gradients_filename) is False):
raise ParameterTreeError("Gradients file doesn't exist")
self.gradients_filename = gradients_filename
def _set_intercepts_filename(self, intercepts_filename):
intercepts_filename = self.base_path + intercepts_filename
if (os.path.isfile(intercepts_filename) is False):
raise ParameterTreeError("Intercepts file doesn't exist")
self.intercepts_filename = intercepts_filename
def _set_bin_end(self, bin_end):
"""Update bin_end and datasets' histograms' dimensions."""
self.bin_end = bin_end
self.update_histogram_dimensions()
def _set_bin_start(self, bin_start):
"""Update bin_start and datasets' histograms' dimensions."""
self.bin_start = bin_start
self.update_histogram_dimensions()
def _set_bin_width(self, bin_width):
"""Update bin_width and datasets' histograms' dimensions."""
self.bin_width = bin_width
self.update_histogram_dimensions()
def update_datasets_frame_dimensions(self):
"""Update frames' datasets' dimensions."""
for dataset in ["processed_frames", "raw_frames"]:
payload = '{"dims": [%s, %s]}' % (self.rows, self.columns)
command = "config/hdf/dataset/" + dataset
request = ApiAdapterRequest(str(payload), content_type="application/json")
self.adapters["fp"].put(command, request)
def update_histogram_dimensions(self):
"""Update histograms' dimensions in the relevant datasets."""
self.number_histograms = int((self.bin_end - self.bin_start) / self.bin_width)
# spectra_bins dataset
payload = '{"dims": [%s], "chunks": [1, %s]}' % \
(self.number_histograms, self.number_histograms)
command = "config/hdf/dataset/" + "spectra_bins"
request = ApiAdapterRequest(str(payload), content_type="application/json")
self.adapters["fp"].put(command, request)
# pixel_spectra dataset
payload = '{"dims": [%s, %s], "chunks": [1, %s, %s]}' % \
(self.pixels, self.number_histograms, self.pixels, self.number_histograms)
command = "config/hdf/dataset/" + "pixel_spectra"
request = ApiAdapterRequest(str(payload), content_type="application/json")
self.adapters["fp"].put(command, request)
# summed_spectra dataset
payload = '{"dims": [%s], "chunks": [1, %s]}' % \
(self.number_histograms, self.number_histograms)
command = "config/hdf/dataset/" + "summed_spectra"
request = ApiAdapterRequest(str(payload), content_type="application/json")
self.adapters["fp"].put(command, request)
def _set_max_frames_received(self, max_frames_received):
self.max_frames_received = max_frames_received
def _set_pass_processed(self, pass_processed):
self.pass_processed = pass_processed
def _set_pass_raw(self, pass_raw):
self.pass_raw = pass_raw
def _set_threshold_filename(self, threshold_filename):
threshold_filename = self.base_path + threshold_filename
if (os.path.isfile(threshold_filename) is False):
raise ParameterTreeError("Threshold file doesn't exist")
self.threshold_filename = threshold_filename
def _set_threshold_mode(self, threshold_mode):
threshold_mode = threshold_mode.lower()
if (threshold_mode in self.THRESHOLDOPTIONS):
self.threshold_mode = threshold_mode
else:
raise ParameterTreeError("Must be one of: value, filename or none")
def _set_threshold_value(self, threshold_value):
self.threshold_value = threshold_value
def _get_sensors_layout(self):
return self.sensors_layout
def _set_sensors_layout(self, layout):
"""Set sensors_layout in all FP's plugins and FR; Recalculates rows, columns and pixels."""
self.sensors_layout = layout
# send command to all FP plugins, then FR
plugins = ['addition', 'calibration', 'discrimination', 'histogram', 'reorder',
'next_frame', 'threshold']
for plugin in plugins:
command = "config/" + plugin + "/sensors_layout"
request = ApiAdapterRequest(self.sensors_layout, content_type="application/json")
self.adapters["fp"].put(command, request)
command = "config/decoder_config/sensors_layout"
request = ApiAdapterRequest(self.sensors_layout, content_type="application/json")
self.adapters["fr"].put(command, request)
self.update_rows_columns_pixels()
self.update_datasets_frame_dimensions()
self.update_histogram_dimensions()
def _get_compression_type(self):
return self.compression_type
def _set_compression_type(self, compression_type):
if compression_type in self.COMPRESSIONOPTIONS:
self.compression_type = compression_type
else:
error = "Invalid compression type; Valid options: {}".format(self.COMPRESSIONOPTIONS)
raise ParameterTreeError(error)
def commit_configuration(self):
"""Generate and sends the FP config files."""
# Generate JSON config file determining which plugins, the order to chain them, etc
parameter_tree = self.param_tree.get('')
# Delete any existing datasets
command = "config/hdf/delete_datasets"
request = ApiAdapterRequest("", content_type="application/json")
response = self.adapters["fp"].put(command, request)
status_code = response.status_code
if (status_code != 200):
error = "Error {} deleting existing datasets in fp adapter".format(status_code)
logging.error(error)
self.parent.fems[0]._set_status_error(error)
self.extra_datasets = []
self.master_dataset = "spectra_bins"
if self.pass_raw:
self.master_dataset = "raw_frames"
self.extra_datasets.append(self.master_dataset)
if self.pass_processed:
self.master_dataset = "processed_frames"
self.extra_datasets.append(self.master_dataset)
self.gcf = GenerateConfigFiles(parameter_tree, self.number_histograms,
compression_type=self.compression_type,
master_dataset=self.master_dataset,
extra_datasets=self.extra_datasets)
store_config, execute_config = self.gcf.generate_config_files()
command = "config/config_file/"
request = ApiAdapterRequest(store_config, content_type="application/json")
response = self.adapters["fp"].put(command, request)
status_code = response.status_code
if (status_code != 200):
error = "Error {} parsing store json config file in fp adapter".format(status_code)
logging.error(error)
self.parent.fems[0]._set_status_error(error)
request = ApiAdapterRequest(execute_config, content_type="application/json")
response = self.adapters["fp"].put(command, request)
status_code = response.status_code
if (status_code != 200):
error = "Error {} parsing execute json config file in fp adapter".format(status_code)
logging.error(error)
self.parent.fems[0]._set_status_error(error)
# Allow FP time to process above PUT requests before configuring plugin settings
IOLoop.instance().call_later(0.4, self.submit_configuration)
def submit_configuration(self):
"""Send each ParameterTree value to the corresponding FP plugin."""
# Loop overall plugins in ParameterTree, updating fp's settings except reorder
for plugin in self.param_tree.tree.get("config"):
for param_key in self.param_tree.tree['config'].get(plugin):
# print("config/%s/%s" % (plugin, param_key), " -> ", \
# self.param_tree.tree['config'][plugin][param_key].get(""))
# Don't send histogram's pass_raw, pass_processed, since Odin Control do not support bool
if param_key not in ["pass_processed", "pass_raw"]:
command = "config/%s/%s" % (plugin, param_key)
payload = self.param_tree.tree['config'][plugin][param_key].get()
request = ApiAdapterRequest(str(payload), content_type="application/json")
self.adapters["fp"].put(command, request)
# Which plugin determines when processing finished?
if (self.pass_raw or self.pass_processed):
self.plugin = "hdf"
else:
self.plugin = "histogram"
| [
"os.path.exists",
"logging.debug",
"os.path.join",
"logging.info",
"logging.warning",
"os.getcwd",
"h5py.File",
"hexitec.GenerateConfigFiles.GenerateConfigFiles",
"os.path.isfile",
"tornado.ioloop.IOLoop.instance",
"odin.adapters.adapter.ApiAdapterRequest",
"datetime.datetime.now",
"functool... | [((2072, 2083), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2081, 2083), False, 'import os\n'), ((7839, 7878), 'logging.debug', 'logging.debug', (['"""Setting up Acquisition"""'], {}), "('Setting up Acquisition')\n", (7852, 7878), False, 'import logging\n'), ((9097, 9225), 'logging.info', 'logging.info', (['"""FRAME START ACQ: %d END ACQ: %d"""', 'self.frame_start_acquisition', '(self.frame_start_acquisition + number_frames)'], {}), "('FRAME START ACQ: %d END ACQ: %d', self.\n frame_start_acquisition, self.frame_start_acquisition + number_frames)\n", (9109, 9225), False, 'import logging\n'), ((9363, 9374), 'time.time', 'time.time', ([], {}), '()\n', (9372, 9374), False, 'import time\n'), ((9383, 9420), 'logging.debug', 'logging.debug', (['"""Starting File Writer"""'], {}), "('Starting File Writer')\n", (9396, 9420), False, 'import logging\n'), ((22005, 22070), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['self.file_dir'], {'content_type': '"""application/json"""'}), "(self.file_dir, content_type='application/json')\n", (22022, 22070), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((22312, 22377), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['self.file_dir'], {'content_type': '"""application/json"""'}), "(self.file_dir, content_type='application/json')\n", (22329, 22377), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((22905, 22970), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['self.file_dir'], {'content_type': '"""application/json"""'}), "(self.file_dir, content_type='application/json')\n", (22922, 22970), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((23242, 23299), 'os.path.join', 'os.path.join', (['self.config_dir', 'self.config_files[adapter]'], {}), '(self.config_dir, self.config_files[adapter])\n', (23254, 23299), False, 'import os\n'), ((23317, 23343), 'os.path.expanduser', 'os.path.expanduser', (['config'], {}), '(config)\n', (23335, 23343), False, 'import os\n'), ((23425, 23446), 'logging.debug', 'logging.debug', (['config'], {}), '(config)\n', (23438, 23446), False, 'import logging\n'), ((23465, 23523), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['config'], {'content_type': '"""application/json"""'}), "(config, content_type='application/json')\n", (23482, 23523), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((29309, 29380), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['self.sensors_layout'], {'content_type': '"""application/json"""'}), "(self.sensors_layout, content_type='application/json')\n", (29326, 29380), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((30298, 30352), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['""""""'], {'content_type': '"""application/json"""'}), "('', content_type='application/json')\n", (30315, 30352), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((31051, 31231), 'hexitec.GenerateConfigFiles.GenerateConfigFiles', 'GenerateConfigFiles', (['parameter_tree', 'self.number_histograms'], {'compression_type': 'self.compression_type', 'master_dataset': 'self.master_dataset', 'extra_datasets': 'self.extra_datasets'}), '(parameter_tree, self.number_histograms,\n compression_type=self.compression_type, master_dataset=self.\n master_dataset, extra_datasets=self.extra_datasets)\n', (31070, 31231), False, 'from hexitec.GenerateConfigFiles import GenerateConfigFiles\n'), ((31472, 31536), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['store_config'], {'content_type': '"""application/json"""'}), "(store_config, content_type='application/json')\n", (31489, 31536), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((31880, 31946), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['execute_config'], {'content_type': '"""application/json"""'}), "(execute_config, content_type='application/json')\n", (31897, 31946), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((8036, 8103), 'logging.error', 'logging.error', (['"""Cannot start Acquisition: Frame Receiver not found"""'], {}), "('Cannot start Acquisition: Frame Receiver not found')\n", (8049, 8103), False, 'import logging\n'), ((8375, 8443), 'logging.error', 'logging.error', (['"""Cannot Start Acquisition: Frame Processor not found"""'], {}), "('Cannot Start Acquisition: Frame Processor not found')\n", (8388, 8443), False, 'import logging\n'), ((10351, 10362), 'time.time', 'time.time', ([], {}), '()\n', (10360, 10362), False, 'import time\n'), ((11370, 11407), 'logging.debug', 'logging.debug', (['"""Acquisition Complete"""'], {}), "('Acquisition Complete')\n", (11383, 11407), False, 'import logging\n'), ((13325, 13363), 'os.path.exists', 'os.path.exists', (['self.hdf_file_location'], {}), '(self.hdf_file_location)\n', (13339, 13363), False, 'import os\n'), ((13745, 13784), 'h5py.File', 'h5py.File', (['self.hdf_file_location', '"""r+"""'], {}), "(self.hdf_file_location, 'r+')\n", (13754, 13784), False, 'import h5py\n'), ((16770, 16798), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (16788, 16798), False, 'import h5py\n'), ((20015, 20071), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['None'], {'content_type': '"""application/json"""'}), "(None, content_type='application/json')\n", (20032, 20071), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((20633, 20656), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['None'], {}), '(None)\n', (20650, 20656), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((24658, 24701), 'odin.adapters.parameter_tree.ParameterTreeError', 'ParameterTreeError', (['"""Must be either 3 or 5"""'], {}), "('Must be either 3 or 5')\n", (24676, 24701), False, 'from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError\n'), ((24839, 24873), 'os.path.isfile', 'os.path.isfile', (['gradients_filename'], {}), '(gradients_filename)\n', (24853, 24873), False, 'import os\n'), ((24903, 24953), 'odin.adapters.parameter_tree.ParameterTreeError', 'ParameterTreeError', (['"""Gradients file doesn\'t exist"""'], {}), '("Gradients file doesn\'t exist")\n', (24921, 24953), False, 'from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError\n'), ((25148, 25183), 'os.path.isfile', 'os.path.isfile', (['intercepts_filename'], {}), '(intercepts_filename)\n', (25162, 25183), False, 'import os\n'), ((25213, 25264), 'odin.adapters.parameter_tree.ParameterTreeError', 'ParameterTreeError', (['"""Intercepts file doesn\'t exist"""'], {}), '("Intercepts file doesn\'t exist")\n', (25231, 25264), False, 'from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError\n'), ((27989, 28023), 'os.path.isfile', 'os.path.isfile', (['threshold_filename'], {}), '(threshold_filename)\n', (28003, 28023), False, 'import os\n'), ((28053, 28103), 'odin.adapters.parameter_tree.ParameterTreeError', 'ParameterTreeError', (['"""Threshold file doesn\'t exist"""'], {}), '("Threshold file doesn\'t exist")\n', (28071, 28103), False, 'from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError\n'), ((28392, 28453), 'odin.adapters.parameter_tree.ParameterTreeError', 'ParameterTreeError', (['"""Must be one of: value, filename or none"""'], {}), "('Must be one of: value, filename or none')\n", (28410, 28453), False, 'from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError\n'), ((29107, 29178), 'odin.adapters.adapter.ApiAdapterRequest', 'ApiAdapterRequest', (['self.sensors_layout'], {'content_type': '"""application/json"""'}), "(self.sensors_layout, content_type='application/json')\n", (29124, 29178), False, 'from odin.adapters.adapter import ApiAdapterRequest\n'), ((29935, 29960), 'odin.adapters.parameter_tree.ParameterTreeError', 'ParameterTreeError', (['error'], {}), '(error)\n', (29953, 29960), False, 'from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError\n'), ((30595, 30615), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (30608, 30615), False, 'import logging\n'), ((31783, 31803), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (31796, 31803), False, 'import logging\n'), ((32194, 32214), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (32207, 32214), False, 'import logging\n'), ((8247, 8307), 'logging.debug', 'logging.debug', (['"""Frame Receiver Already connected/configured"""'], {}), "('Frame Receiver Already connected/configured')\n", (8260, 8307), False, 'import logging\n'), ((8587, 8648), 'logging.debug', 'logging.debug', (['"""Frame Processor Already connected/configured"""'], {}), "('Frame Processor Already connected/configured')\n", (8600, 8648), False, 'import logging\n'), ((9847, 9864), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (9862, 9864), False, 'from tornado.ioloop import IOLoop\n'), ((12382, 12393), 'time.time', 'time.time', ([], {}), '()\n', (12391, 12393), False, 'import time\n'), ((14348, 14434), 'logging.error', 'logging.error', (['("Failed to open \'%s\' with error: %s" % (self.hdf_file_location, e))'], {}), '("Failed to open \'%s\' with error: %s" % (self.\n hdf_file_location, e))\n', (14361, 14434), False, 'import logging\n'), ((17579, 17604), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (17593, 17604), False, 'import os\n'), ((20227, 20276), 'logging.warning', 'logging.warning', (["('%s Adapter Not Found' % adapter)"], {}), "('%s Adapter Not Found' % adapter)\n", (20242, 20276), False, 'import logging\n'), ((21255, 21329), 'logging.warning', 'logging.warning', (["('KeyError when trying to get config file: %s' % key_error)"], {}), "('KeyError when trying to get config file: %s' % key_error)\n", (21270, 21329), False, 'import logging\n'), ((32370, 32387), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (32385, 32387), False, 'from tornado.ioloop import IOLoop\n'), ((9741, 9755), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9753, 9755), False, 'from datetime import datetime\n'), ((10109, 10126), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (10124, 10126), False, 'from tornado.ioloop import IOLoop\n'), ((10413, 10430), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (10428, 10430), False, 'from tornado.ioloop import IOLoop\n'), ((10935, 10952), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (10950, 10952), False, 'from tornado.ioloop import IOLoop\n'), ((11299, 11316), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (11314, 11316), False, 'from tornado.ioloop import IOLoop\n'), ((11552, 11569), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (11567, 11569), False, 'from tornado.ioloop import IOLoop\n'), ((12531, 12548), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (12546, 12548), False, 'from tornado.ioloop import IOLoop\n'), ((12755, 12769), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12767, 12769), False, 'from datetime import datetime\n'), ((13091, 13108), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (13106, 13108), False, 'from tornado.ioloop import IOLoop\n'), ((14122, 14212), 'logging.warning', 'logging.warning', (["(' Re-try attempt: %s Reopen file, because: %s' % (self.hdf_retry, e))"], {}), "(' Re-try attempt: %s Reopen file, because: %s' % (self.\n hdf_retry, e))\n", (14137, 14212), False, 'import logging\n'), ((16602, 16616), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16614, 16616), False, 'from datetime import datetime\n'), ((18392, 18477), 'logging.debug', 'logging.debug', (['("Key \'%s\'; Successfully read file \'%s\'" % (param_file, file_name))'], {}), '("Key \'%s\'; Successfully read file \'%s\'" % (param_file, file_name)\n )\n', (18405, 18477), False, 'import logging\n'), ((18515, 18592), 'logging.error', 'logging.error', (['("Key: %s\'s file: %s. Doesn\'t exist!" % (param_file, file_name))'], {}), '("Key: %s\'s file: %s. Doesn\'t exist!" % (param_file, file_name))\n', (18528, 18592), False, 'import logging\n'), ((4082, 4126), 'functools.partial', 'partial', (['self._is_od_connected'], {'adapter': '"""fr"""'}), "(self._is_od_connected, adapter='fr')\n", (4089, 4126), False, 'from functools import partial\n'), ((4229, 4264), 'functools.partial', 'partial', (['self.get_config_file', '"""fr"""'], {}), "(self.get_config_file, 'fr')\n", (4236, 4264), False, 'from functools import partial\n'), ((4344, 4388), 'functools.partial', 'partial', (['self._is_od_connected'], {'adapter': '"""fp"""'}), "(self._is_od_connected, adapter='fp')\n", (4351, 4388), False, 'from functools import partial\n'), ((4491, 4526), 'functools.partial', 'partial', (['self.get_config_file', '"""fp"""'], {}), "(self.get_config_file, 'fp')\n", (4498, 4526), False, 'from functools import partial\n'), ((10226, 10240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10238, 10240), False, 'from datetime import datetime\n'), ((14256, 14273), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (14271, 14273), False, 'from tornado.ioloop import IOLoop\n'), ((17980, 18065), 'logging.error', 'logging.error', (["('Failed to read %s XML file %s : %s ' % (param_file, file_name, e))"], {}), "('Failed to read %s XML file %s : %s ' % (param_file,\n file_name, e))\n", (17993, 18065), False, 'import logging\n'), ((18201, 18304), 'logging.error', 'logging.error', (["('Exception creating metadata for %s XML file %s : %s' % (param_file,\n param_file, e))"], {}), "('Exception creating metadata for %s XML file %s : %s' % (\n param_file, param_file, e))\n", (18214, 18304), False, 'import logging\n'), ((12154, 12168), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12166, 12168), False, 'from datetime import datetime\n')] |
from pathlib import Path
import pandas as pd
flights = pd.concat(
pd.read_csv(file, parse_dates=["firstseen", "lastseen", "day"])
for file in Path("../dataset").glob("flightlist_*.csv.gz")
)
flights_dropna = flights.dropna(axis=0, subset=['registration', 'typecode','origin','destination', 'altitude_2'])
print(flights_dropna.head(10))
print(flights_dropna.isna().sum())
print(len(flights_dropna))
for col in flights_dropna.columns.values:
print("%s's cardinality: "%col)
print(len(flights_dropna[col].unique()))
flights_dropna.to_csv('../fligths_covid.csv', index=False)
print('done.') | [
"pandas.read_csv",
"pathlib.Path"
] | [((71, 134), 'pandas.read_csv', 'pd.read_csv', (['file'], {'parse_dates': "['firstseen', 'lastseen', 'day']"}), "(file, parse_dates=['firstseen', 'lastseen', 'day'])\n", (82, 134), True, 'import pandas as pd\n'), ((151, 169), 'pathlib.Path', 'Path', (['"""../dataset"""'], {}), "('../dataset')\n", (155, 169), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
#
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring,invalid-name
import unittest
import numpy as np
from qiskit import Aer
from qiskit.compiler import assemble
from qiskit.ignis.verification.tomography import GatesetTomographyFitter
from qiskit.ignis.verification.tomography import gateset_tomography_circuits
from qiskit.ignis.verification.tomography.basis import default_gateset_basis
from qiskit.providers.aer.noise import NoiseModel
from qiskit.extensions import HGate, SGate
from qiskit.quantum_info import PTM
class TestGatesetTomography(unittest.TestCase):
@staticmethod
def collect_tomography_data(shots=10000,
noise_model=None,
gateset_basis='Default'):
backend_qasm = Aer.get_backend('qasm_simulator')
circuits = gateset_tomography_circuits(gateset_basis=gateset_basis)
qobj = assemble(circuits, shots=shots)
result = backend_qasm.run(qobj, noise_model=noise_model).result()
fitter = GatesetTomographyFitter(result, circuits, gateset_basis)
return fitter
@staticmethod
def expected_linear_inversion_gates(Gs, Fs):
rho = Gs['rho']
E = Gs['E']
B = np.array([(F @ rho).T[0] for F in Fs]).T
BB = np.linalg.inv(B)
gates = {label: BB @ G @ B for (label, G) in Gs.items()
if label not in ['E', 'rho']}
gates['E'] = E @ B
gates['rho'] = BB @ rho
return gates
@staticmethod
def hs_distance(A, B):
return sum([np.abs(x) ** 2 for x in np.nditer(A-B)])
@staticmethod
def convert_from_ptm(vector):
Id = np.sqrt(0.5) * np.array([[1, 0], [0, 1]])
X = np.sqrt(0.5) * np.array([[0, 1], [1, 0]])
Y = np.sqrt(0.5) * np.array([[0, -1j], [1j, 0]])
Z = np.sqrt(0.5) * np.array([[1, 0], [0, -1]])
v = vector.reshape(4)
return v[0] * Id + v[1] * X + v[2] * Y + v[3] * Z
def compare_gates(self, expected_gates, result_gates, labels, delta=0.2):
for label in labels:
expected_gate = expected_gates[label]
result_gate = result_gates[label].data
msg = "Failure on gate {}: Expected gate = \n{}\n" \
"vs Actual gate = \n{}".format(label,
expected_gate,
result_gate)
distance = self.hs_distance(expected_gate, result_gate)
self.assertAlmostEqual(distance, 0, delta=delta, msg=msg)
def run_test_on_basis_and_noise(self,
gateset_basis='Default',
noise_model=None,
noise_ptm=None):
if gateset_basis == 'Default':
gateset_basis = default_gateset_basis()
labels = gateset_basis.gate_labels
gates = gateset_basis.gate_matrices
gates['rho'] = np.array([[np.sqrt(0.5)], [0], [0], [np.sqrt(0.5)]])
gates['E'] = np.array([[np.sqrt(0.5), 0, 0, np.sqrt(0.5)]])
# apply noise if given
for label in labels:
if label != "Id" and noise_ptm is not None:
gates[label] = noise_ptm @ gates[label]
Fs = [gateset_basis.spam_matrix(label)
for label in gateset_basis.spam_labels]
# prepare the fitter
fitter = self.collect_tomography_data(shots=10000,
noise_model=noise_model,
gateset_basis=gateset_basis)
# linear inversion test
result_gates = fitter.linear_inversion()
expected_gates = self.expected_linear_inversion_gates(gates, Fs)
self.compare_gates(expected_gates, result_gates, labels + ['E', 'rho'])
# fitter optimization test
result_gates = fitter.fit()
expected_gates = gates
expected_gates['E'] = self.convert_from_ptm(expected_gates['E'])
expected_gates['rho'] = self.convert_from_ptm(expected_gates['rho'])
self.compare_gates(expected_gates, result_gates, labels + ['E', 'rho'])
def test_noiseless_standard_basis(self):
self.run_test_on_basis_and_noise()
def test_noiseless_h_gate_standard_basis(self):
basis = default_gateset_basis()
basis.add_gate(HGate())
self.run_test_on_basis_and_noise(gateset_basis=basis)
def test_noiseless_s_gate_standard_basis(self):
basis = default_gateset_basis()
basis.add_gate(SGate())
self.run_test_on_basis_and_noise(gateset_basis=basis)
def test_amplitude_damping_standard_basis(self):
gamma = 0.05
noise_ptm = PTM(np.array([[1, 0, 0, 0],
[0, np.sqrt(1-gamma), 0, 0],
[0, 0, np.sqrt(1-gamma), 0],
[gamma, 0, 0, 1-gamma]]))
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(noise_ptm, ['u1', 'u2', 'u3'])
self.run_test_on_basis_and_noise(noise_model=noise_model,
noise_ptm=np.real(noise_ptm.data))
def test_depolarization_standard_basis(self):
p = 0.05
noise_ptm = PTM(np.array([[1, 0, 0, 0],
[0, 1-p, 0, 0],
[0, 0, 1-p, 0],
[0, 0, 0, 1-p]]))
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(noise_ptm, ['u1', 'u2', 'u3'])
self.run_test_on_basis_and_noise(noise_model=noise_model,
noise_ptm=np.real(noise_ptm.data))
if __name__ == '__main__':
unittest.main()
| [
"numpy.abs",
"qiskit.ignis.verification.tomography.basis.default_gateset_basis",
"qiskit.ignis.verification.tomography.GatesetTomographyFitter",
"numpy.sqrt",
"qiskit.ignis.verification.tomography.gateset_tomography_circuits",
"numpy.nditer",
"qiskit.compiler.assemble",
"qiskit.extensions.SGate",
"n... | [((6227, 6242), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6240, 6242), False, 'import unittest\n'), ((1256, 1289), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""qasm_simulator"""'], {}), "('qasm_simulator')\n", (1271, 1289), False, 'from qiskit import Aer\n'), ((1309, 1365), 'qiskit.ignis.verification.tomography.gateset_tomography_circuits', 'gateset_tomography_circuits', ([], {'gateset_basis': 'gateset_basis'}), '(gateset_basis=gateset_basis)\n', (1336, 1365), False, 'from qiskit.ignis.verification.tomography import gateset_tomography_circuits\n'), ((1381, 1412), 'qiskit.compiler.assemble', 'assemble', (['circuits'], {'shots': 'shots'}), '(circuits, shots=shots)\n', (1389, 1412), False, 'from qiskit.compiler import assemble\n'), ((1504, 1560), 'qiskit.ignis.verification.tomography.GatesetTomographyFitter', 'GatesetTomographyFitter', (['result', 'circuits', 'gateset_basis'], {}), '(result, circuits, gateset_basis)\n', (1527, 1560), False, 'from qiskit.ignis.verification.tomography import GatesetTomographyFitter\n'), ((1761, 1777), 'numpy.linalg.inv', 'np.linalg.inv', (['B'], {}), '(B)\n', (1774, 1777), True, 'import numpy as np\n'), ((4800, 4823), 'qiskit.ignis.verification.tomography.basis.default_gateset_basis', 'default_gateset_basis', ([], {}), '()\n', (4821, 4823), False, 'from qiskit.ignis.verification.tomography.basis import default_gateset_basis\n'), ((4987, 5010), 'qiskit.ignis.verification.tomography.basis.default_gateset_basis', 'default_gateset_basis', ([], {}), '()\n', (5008, 5010), False, 'from qiskit.ignis.verification.tomography.basis import default_gateset_basis\n'), ((5436, 5448), 'qiskit.providers.aer.noise.NoiseModel', 'NoiseModel', ([], {}), '()\n', (5446, 5448), False, 'from qiskit.providers.aer.noise import NoiseModel\n'), ((5960, 5972), 'qiskit.providers.aer.noise.NoiseModel', 'NoiseModel', ([], {}), '()\n', (5970, 5972), False, 'from qiskit.providers.aer.noise import NoiseModel\n'), ((1707, 1745), 'numpy.array', 'np.array', (['[(F @ rho).T[0] for F in Fs]'], {}), '([(F @ rho).T[0] for F in Fs])\n', (1715, 1745), True, 'import numpy as np\n'), ((2142, 2154), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2149, 2154), True, 'import numpy as np\n'), ((2157, 2183), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (2165, 2183), True, 'import numpy as np\n'), ((2196, 2208), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2203, 2208), True, 'import numpy as np\n'), ((2211, 2237), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2219, 2237), True, 'import numpy as np\n'), ((2250, 2262), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2257, 2262), True, 'import numpy as np\n'), ((2265, 2298), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (2273, 2298), True, 'import numpy as np\n'), ((2307, 2319), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2314, 2319), True, 'import numpy as np\n'), ((2322, 2349), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (2330, 2349), True, 'import numpy as np\n'), ((3310, 3333), 'qiskit.ignis.verification.tomography.basis.default_gateset_basis', 'default_gateset_basis', ([], {}), '()\n', (3331, 3333), False, 'from qiskit.ignis.verification.tomography.basis import default_gateset_basis\n'), ((4847, 4854), 'qiskit.extensions.HGate', 'HGate', ([], {}), '()\n', (4852, 4854), False, 'from qiskit.extensions import HGate, SGate\n'), ((5034, 5041), 'qiskit.extensions.SGate', 'SGate', ([], {}), '()\n', (5039, 5041), False, 'from qiskit.extensions import HGate, SGate\n'), ((5762, 5840), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1 - p, 0, 0], [0, 0, 1 - p, 0], [0, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, 0], [0, 1 - p, 0, 0], [0, 0, 1 - p, 0], [0, 0, 0, 1 - p]])\n', (5770, 5840), True, 'import numpy as np\n'), ((5645, 5668), 'numpy.real', 'np.real', (['noise_ptm.data'], {}), '(noise_ptm.data)\n', (5652, 5668), True, 'import numpy as np\n'), ((6169, 6192), 'numpy.real', 'np.real', (['noise_ptm.data'], {}), '(noise_ptm.data)\n', (6176, 6192), True, 'import numpy as np\n'), ((2035, 2044), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2041, 2044), True, 'import numpy as np\n'), ((2059, 2075), 'numpy.nditer', 'np.nditer', (['(A - B)'], {}), '(A - B)\n', (2068, 2075), True, 'import numpy as np\n'), ((3456, 3468), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3463, 3468), True, 'import numpy as np\n'), ((3482, 3494), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3489, 3494), True, 'import numpy as np\n'), ((3530, 3542), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3537, 3542), True, 'import numpy as np\n'), ((3550, 3562), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (3557, 3562), True, 'import numpy as np\n'), ((5266, 5284), 'numpy.sqrt', 'np.sqrt', (['(1 - gamma)'], {}), '(1 - gamma)\n', (5273, 5284), True, 'import numpy as np\n'), ((5332, 5350), 'numpy.sqrt', 'np.sqrt', (['(1 - gamma)'], {}), '(1 - gamma)\n', (5339, 5350), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import dataclasses
import enum
import json
import pathlib
import subprocess
import textwrap
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, TypeVar
import inflection
import jinja2
from jinja2 import StrictUndefined
PROJECT_ROOT = pathlib.Path(__file__).parent.parent
API_INPUT_FILE = PROJECT_ROOT / "protocol_generator" / "api_definition.json"
CONSTANTS_INPUT_FILE = PROJECT_ROOT / "protocol_generator" / "constant_definition.json"
TARGET_PATH = PROJECT_ROOT / "esque_wire"
TEMPLATE_PATH = PROJECT_ROOT / "protocol_generator" / "templates"
T = TypeVar("T")
class FieldType(str, enum.Enum):
ARRAY = "ARRAY"
STRUCT = "STRUCT"
BOOLEAN = "BOOLEAN"
ENUM = "ENUM"
INT8 = "INT8"
INT16 = "INT16"
INT32 = "INT32"
INT64 = "INT64"
UINT32 = "UINT32"
VARINT = "VARINT"
VARLONG = "VARLONG"
STRING = "STRING"
NULLABLE_STRING = "NULLABLE_STRING"
BYTES = "BYTES"
NULLABLE_BYTES = "NULLABLE_BYTES"
RECORDS = "RECORDS"
class Direction(enum.Enum):
REQUEST = "request"
RESPONSE = "response"
def serializer(type_def: "TypeDef") -> str:
assert type_def.field_type not in (FieldType.ARRAY, FieldType.STRUCT), "Should use their own implementation"
return inflection.camelize(type_def.field_type.name, uppercase_first_letter=False) + "Serializer"
@dataclasses.dataclass
class TypeDef:
field_type: FieldType
def traverse_types(self) -> Iterable["TypeDef"]:
yield self
@staticmethod
def from_dict(data: Dict) -> "TypeDef":
field_type = FieldType(data["type"])
if field_type == FieldType.ARRAY:
return ArrayTypeDef.from_dict(data)
if field_type == FieldType.STRUCT:
return StructTypeDef.from_dict(data)
if field_type == FieldType.ENUM:
return EnumTypeDef.from_dict(data)
return TypeDef(field_type)
@property
def type_hint(self) -> str:
assert self.field_type not in (FieldType.ARRAY, FieldType.STRUCT), "Should use their own implementation"
if self.field_type == FieldType.BOOLEAN:
return "bool"
elif self.field_type == FieldType.INT8:
return "int"
elif self.field_type == FieldType.INT16:
return "int"
elif self.field_type == FieldType.INT32:
return "int"
elif self.field_type == FieldType.INT64:
return "int"
elif self.field_type == FieldType.UINT32:
return "int"
elif self.field_type == FieldType.VARINT:
return "int"
elif self.field_type == FieldType.VARLONG:
return "int"
elif self.field_type == FieldType.STRING:
return "str"
elif self.field_type == FieldType.NULLABLE_STRING:
return "Optional[str]"
elif self.field_type == FieldType.BYTES:
return "bytes"
elif self.field_type == FieldType.NULLABLE_BYTES:
return "Optional[bytes]"
elif self.field_type == FieldType.RECORDS:
return "Optional[bytes]"
else:
raise ValueError(f"No type hint for {self.field_type}")
@property
def type_import_name(self) -> Optional[str]:
if self.field_type in (FieldType.NULLABLE_BYTES, FieldType.NULLABLE_STRING, FieldType.RECORDS):
return "Optional"
return None
@property
def serializer_import_name(self) -> str:
return PRIMITIVE_SERIALIZERS[self.field_type]
@property
def constant_import_name(self) -> Optional[str]:
return None
def serializer_definition(self, version=0):
return PRIMITIVE_SERIALIZERS[self.field_type]
def serializer_variable_name(self, version=0):
return PRIMITIVE_SERIALIZERS[self.field_type]
@dataclasses.dataclass
class ArrayTypeDef(TypeDef):
element_type: TypeDef
@staticmethod
def from_dict(data: Dict) -> "ArrayTypeDef":
return ArrayTypeDef(FieldType.ARRAY, TypeDef.from_dict(data["element_type"]))
def traverse_types(self) -> Iterable["TypeDef"]:
yield self
yield from self.element_type.traverse_types()
@property
def type_hint(self) -> str:
return f"List[{self.element_type.type_hint}]"
@property
def type_import_name(self) -> Optional[str]:
return "List"
@property
def serializer_import_name(self) -> str:
return "ArraySerializer"
def serializer_definition(self, version=0):
return f"ArraySerializer({self.element_type.serializer_definition(version)})"
def serializer_variable_name(self, version=0):
return f"ArraySerializer({self.element_type.serializer_variable_name(version)})"
@dataclasses.dataclass
class DummyTypeDef(TypeDef):
element_type: TypeDef
default: Any
has_default: bool
def traverse_types(self) -> Iterable["TypeDef"]:
yield self
yield from self.element_type.traverse_types()
@property
def type_hint(self) -> str:
return self.element_type.type_hint
@property
def serializer_import_name(self) -> str:
return "DummySerializer"
def serializer_definition(self, version=0):
if self.has_default:
default_def = repr(self.default)
else:
default_def = self.element_type.serializer_definition()
default_def += ".default"
return f"DummySerializer({default_def})"
def serializer_variable_name(self, version=0):
if self.has_default:
default_def = repr(self.default)
else:
default_def = self.element_type.serializer_variable_name(-1)
default_def += ".default"
return f"DummySerializer({default_def})"
@dataclasses.dataclass
class StructTypeDef(TypeDef):
fields: List["Field"]
name: str = "<noname>"
field_names: List[str] = dataclasses.field(init=False, default_factory=list)
def __post_init__(self):
self.field_names.extend(f.name for f in self.fields)
@staticmethod
def from_dict(data: Dict) -> "StructTypeDef":
fields = [Field.from_dict(field_data) for field_data in data["fields"]]
return StructTypeDef(FieldType.STRUCT, fields)
def traverse_types(self) -> Iterable["TypeDef"]:
yield self
for field in self.fields:
yield from field.type_def.traverse_types()
def traverse_fields(self) -> Iterable["Field"]:
for type_def in self.traverse_types():
if isinstance(type_def, StructTypeDef):
yield from type_def.fields
@property
def type_hint(self) -> str:
return self.name
def schema_dict_name(self, include_type: bool = False) -> str:
name = f"{lower_first(self.name)}Schemas"
if include_type:
name += ": Dict[int, Schema]"
return name
def schema_variable_name(self, version: int = 0) -> str:
return f"{self.schema_dict_name()}[{version}]"
def serializer_dict_name(self, include_type: bool = False) -> str:
name = f"{ lower_first(self.name) }Serializers"
if include_type:
name += f": Dict[int, ClassSerializer[{self.name}]]"
return name
def serializer_variable_name(self, version: int = 0) -> str:
return f"{self.serializer_dict_name()}[{version}]"
@property
def serializer_import_name(self) -> str:
return "ClassSerializer"
def serializer_definition(self, version: int = 0, schema: Optional[str] = None) -> str:
if schema is None:
schema = self.schema_variable_name(version)
return f"ClassSerializer({self.name}, {schema})"
def make_compatible_to(self, other_struct: "StructTypeDef") -> None:
self._skip_extra_fields(other_struct)
self._add_missing_fields(other_struct)
def _skip_extra_fields(self, other_struct: "StructTypeDef") -> None:
for field in self.fields:
if field.name not in other_struct.field_names:
field.skip = True
def _add_missing_fields(self, other_struct: "StructTypeDef") -> None:
for field in other_struct.fields:
if field.name not in self.field_names:
dummy_type = DummyTypeDef(
field_type=field.type_def.field_type,
element_type=field.type_def,
default=field.default,
has_default=field.has_default,
)
dummy_field = dataclasses.replace(field, type_def=dummy_type)
self.fields.append(dummy_field)
self.field_names.append(field.name)
@dataclasses.dataclass
class EnumTypeDef(TypeDef):
enum_class: str
# Maybe we'll have to keep the primitive type here (i.e. INT8 or INT16) and create a custom serializer
# when the data type is non-default, but I hope that they're always the same for the same enum class.
# We define the default types in jython_api_gen.py in order to create the enum serializers in
# serializers/constants.py
@staticmethod
def from_dict(data: Dict) -> "EnumTypeDef":
return EnumTypeDef(FieldType.ARRAY, enum_class=data["enum_class"])
@property
def type_hint(self) -> str:
return self.enum_class
@property
def struct_import_name(self) -> Optional[str]:
return self.enum_class
@property
def serializer_import_name(self) -> str:
return lower_first(self.enum_class) + "Serializer"
def serializer_definition(self, version=0):
return self.serializer_import_name
def serializer_variable_name(self, version=0):
return self.serializer_import_name
@property
def constant_import_name(self) -> Optional[str]:
return self.enum_class
@dataclasses.dataclass
class Field:
name: str
doc: str
default: Any
has_default: bool
type_def: TypeDef
skip: bool = False
@classmethod
def from_dict(cls, data: Dict) -> "Field":
type_def = TypeDef.from_dict(data)
return Field(data["name"], data["doc"], data["default"], data["has_default"], type_def)
@property
def type_hint(self) -> str:
return self.type_def.type_hint
@property
def rendered_name(self) -> str:
if self.skip:
return "None"
return repr(self.name)
@dataclasses.dataclass
class ApiSchema:
api_key: int
api_version: int
api_name: str
direction: Direction
schema: StructTypeDef
structs: Dict[str, StructTypeDef] = dataclasses.field(default_factory=dict)
structs_ordered: List[StructTypeDef] = dataclasses.field(default_factory=list)
def __post_init__(self):
self._assign_names_to_structs()
self._create_struct_dict()
self._resolve_struct_dependencies()
def _assign_names_to_structs(self) -> None:
self.schema.name = (
inflection.camelize(self.api_name.lower(), uppercase_first_letter=True)
+ self.direction.name.title()
+ "Data"
)
for field, struct_type in self.find_struct_fields():
struct_name = field.name
if isinstance(field.type_def, ArrayTypeDef):
struct_name = singularize(struct_name)
struct_type.name = inflection.camelize(struct_name, uppercase_first_letter=True)
def find_struct_fields(self) -> Iterable[Tuple[Field, StructTypeDef]]:
for field in self.schema.traverse_fields():
inner_type = skip_array_type_defs(field.type_def)
if isinstance(inner_type, StructTypeDef):
yield field, inner_type
def _create_struct_dict(self):
for _, struct in self.find_struct_fields():
self.structs[struct.name] = struct
self.structs[self.schema.name] = self.schema
def _resolve_struct_dependencies(self):
dependency_tree: Dict[str, Set[str]] = {}
to_be_visited: List[StructTypeDef] = [self.schema]
while to_be_visited:
current_struct = to_be_visited.pop()
dependency_tree[current_struct.name] = set()
for field in current_struct.fields:
inner_type = skip_array_type_defs(field.type_def)
if isinstance(inner_type, StructTypeDef):
to_be_visited.append(inner_type)
dependency_tree[current_struct.name].add(inner_type.name)
while dependency_tree:
for name, dependencies in dependency_tree.items():
if len(dependencies) == 0:
break
else:
raise RuntimeError("No Struct without dependencies found!")
del dependency_tree[name]
self.structs_ordered.append(self.structs[name])
for dependencies in dependency_tree.values():
dependencies.discard(name)
def make_compatible_to(self, other_schema: "ApiSchema") -> None:
for struct_name, other_struct in other_schema.structs.items():
if struct_name in self.structs:
self.structs[struct_name].make_compatible_to(other_struct)
@dataclasses.dataclass
class Api:
api_key: int
api_name: str
cluster_aciton: bool
api_versions: Dict[int, Dict[Direction, ApiSchema]]
latest_version: int = dataclasses.field(init=False)
latest_schema_pair: Dict[Direction, ApiSchema] = dataclasses.field(init=False)
min_supported_version: int = dataclasses.field(init=False)
max_supported_version: int = dataclasses.field(init=False)
def __post_init__(self):
self.latest_version = max(self.api_versions)
self.max_supported_version = self.latest_version
self.min_supported_version = min(self.api_versions)
self.latest_schema_pair = self.api_versions[self.latest_version]
self._make_old_structs_compatible()
def _make_old_structs_compatible(self) -> None:
for direction in Direction:
new_schema = self.latest_schema_pair[direction]
for api_version, old_schema_pair in self.api_versions.items():
if api_version == self.latest_version:
continue
old_schema = old_schema_pair[direction]
old_schema.make_compatible_to(new_schema)
@classmethod
def from_dict(cls, data: Dict) -> "Api":
api_key = data["api_key"]
api_name = data["api_name"]
schema_iterator: Iterable[Tuple[int, Tuple[Dict, Dict]]] = enumerate(
zip(data["request_schemas"], data["response_schemas"])
)
api_versions: Dict[int, Dict[Direction, ApiSchema]] = {}
for api_version, (request_schema, response_schema) in schema_iterator:
schema_pair = {
Direction.REQUEST: ApiSchema(
api_key, api_version, api_name, Direction.REQUEST, StructTypeDef.from_dict(request_schema)
),
Direction.RESPONSE: ApiSchema(
api_key, api_version, api_name, Direction.RESPONSE, StructTypeDef.from_dict(response_schema)
),
}
api_versions[api_version] = schema_pair
return Api(api_key, api_name, data["cluster_action"], api_versions)
def get_serializer_imports(self, direction: Direction) -> List[str]:
serializers = {"Schema"}
for version_pair in self.api_versions.values():
schema = version_pair[direction]
for field_type in schema.schema.traverse_types():
name = field_type.serializer_import_name
if name is not None:
serializers.add(name)
return sorted(serializers - {None})
def get_type_imports(self, direction: Direction) -> List[str]:
type_hints = {"ClassVar"}
for version_pair in self.api_versions.values():
schema = version_pair[direction]
for field_type in schema.schema.traverse_types():
name = field_type.type_import_name
if name is not None:
type_hints.add(name)
return sorted(type_hints - {None})
def get_constant_imports(self, direction: Direction) -> List[str]:
constants = {"ApiKey"}
for version_pair in self.api_versions.values():
schema = version_pair[direction]
for field_type in schema.schema.traverse_types():
name = field_type.constant_import_name
if name is not None:
constants.add(name)
return sorted(constants - {None})
def main():
constants = load_constants(CONSTANTS_INPUT_FILE)
api_data = load_api_data(API_INPUT_FILE)
render(api_data, constants)
def load_constants(path: pathlib.Path) -> Dict:
with path.open("r") as f:
constants = json.load(f)
return constants
def load_api_data(path: pathlib.Path) -> List[Api]:
with path.open("r") as f:
all_apis = json.load(f)
return [Api.from_dict(data) for data in all_apis]
def skip_array_type_defs(type_def: TypeDef) -> TypeDef:
while isinstance(type_def, ArrayTypeDef):
type_def = type_def.element_type
return type_def
def singularize(text: str) -> str:
if text.endswith("data"):
return text
return inflection.singularize(text)
def lower_first(word: str) -> str:
return word[0].lower() + word[1:]
PRIMITIVE_SERIALIZERS: Dict[FieldType, str] = {
FieldType.BOOLEAN: "booleanSerializer",
FieldType.INT8: "int8Serializer",
FieldType.INT16: "int16Serializer",
FieldType.INT32: "int32Serializer",
FieldType.INT64: "int64Serializer",
FieldType.UINT32: "uint32Serializer",
FieldType.VARINT: "varIntSerializer",
FieldType.VARLONG: "varLongSerializer",
FieldType.STRING: "stringSerializer",
FieldType.NULLABLE_STRING: "nullableStringSerializer",
FieldType.BYTES: "bytesSerializer",
FieldType.NULLABLE_BYTES: "nullableBytesSerializer",
FieldType.RECORDS: "nullableBytesSerializer",
}
class Templater:
template: jinja2.Template
path_template: str
last_target_path: Optional[pathlib.Path]
current_target_path: Optional[pathlib.Path]
def __init__(self, env: jinja2.Environment, template_path: pathlib.Path):
self.template = env.get_template(str(template_path.relative_to(TEMPLATE_PATH)))
path_template = str(TARGET_PATH / template_path.relative_to(TEMPLATE_PATH))
self.path_template = path_template.replace("<", "{").replace(">", "}")[:-3]
self.last_target_path = None
def render(self, all_apis: List[Api], current_api: Api, direction: Direction, constants: Dict) -> None:
self._determine_target_path(current_api, direction)
if not self._target_changed:
return
if self.current_target_path is None:
raise RuntimeError("Need to determine target path first!")
self._update_last_path()
self.current_target_path.parent.mkdir(parents=True, exist_ok=True)
latest_schema = current_api.latest_schema_pair[direction]
all_versions = [api_schema[direction] for api_schema in current_api.api_versions.values()]
self.current_target_path.write_text(
self.template.render(
all_apis=all_apis,
current_api=current_api,
latest_schema=latest_schema,
all_versions=all_versions,
direction=direction,
constants=constants,
)
)
def _determine_target_path(self, current_api: Api, direction: Direction) -> None:
self.current_target_path = pathlib.Path(
self.path_template.format(api_name=current_api.api_name.lower(), direction=direction.name.lower())
)
@property
def _target_changed(self) -> bool:
return self.current_target_path != self.last_target_path
def _update_last_path(self) -> None:
self.last_target_path = self.current_target_path
def without(seq: Iterable[T], *excluded_elems: T) -> Iterable[T]:
for elem in seq:
if elem not in excluded_elems:
yield elem
def is_string(value: Any) -> bool:
return isinstance(value, str)
def render_long_text(text: str, wrap_at: int = 100, **kwargs: Any) -> str:
text = text.strip()
if text == "":
return '""'
if len(text) < wrap_at:
return repr(text)
segments = textwrap.wrap(text, **kwargs)
if len(segments) == 1:
return repr(segments[0])
joined = " ".join(map(repr, segments))
return f"({joined})"
def render(all_apis: List[Api], constants: Dict) -> None:
loader = jinja2.FileSystemLoader(str(TEMPLATE_PATH))
env = jinja2.Environment(autoescape=False, loader=loader, undefined=StrictUndefined)
env.globals["Direction"] = Direction
env.globals["FieldType"] = FieldType
env.globals["len"] = len
env.filters["camelize"] = set
env.filters["is_string"] = is_string
env.filters["lower_first"] = lower_first
env.filters["repr"] = repr
env.filters["without"] = without
env.filters["primitive_serializer"] = PRIMITIVE_SERIALIZERS.get
env.filters["camelize"] = inflection.camelize
env.filters["underscore"] = inflection.underscore
env.filters["render_long_text"] = render_long_text
env.filters["wrap"] = textwrap.wrap
templaters = [
Templater(env, path) for path in TEMPLATE_PATH.glob("**/*.py.j2") if path.name != "base_module.py.j2"
]
for current_api in all_apis:
for direction in Direction:
for templater in templaters:
templater.render(all_apis, current_api, direction, constants)
run_isort()
run_black()
def run_isort():
subprocess.check_call(["isort", "-rc", str(TARGET_PATH)], cwd=str(PROJECT_ROOT))
def run_black():
subprocess.check_call(["black", str(TARGET_PATH)], cwd=str(PROJECT_ROOT))
if __name__ == "__main__":
main()
| [
"jinja2.Environment",
"pathlib.Path",
"json.load",
"textwrap.wrap",
"dataclasses.replace",
"inflection.camelize",
"inflection.singularize",
"dataclasses.field",
"typing.TypeVar"
] | [((590, 602), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (597, 602), False, 'from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, TypeVar\n'), ((5875, 5926), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)', 'default_factory': 'list'}), '(init=False, default_factory=list)\n', (5892, 5926), False, 'import dataclasses\n'), ((10533, 10572), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (10550, 10572), False, 'import dataclasses\n'), ((10616, 10655), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (10633, 10655), False, 'import dataclasses\n'), ((13302, 13331), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (13319, 13331), False, 'import dataclasses\n'), ((13385, 13414), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (13402, 13414), False, 'import dataclasses\n'), ((13448, 13477), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (13465, 13477), False, 'import dataclasses\n'), ((13511, 13540), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (13528, 13540), False, 'import dataclasses\n'), ((17275, 17303), 'inflection.singularize', 'inflection.singularize', (['text'], {}), '(text)\n', (17297, 17303), False, 'import inflection\n'), ((20408, 20437), 'textwrap.wrap', 'textwrap.wrap', (['text'], {}), '(text, **kwargs)\n', (20421, 20437), False, 'import textwrap\n'), ((20693, 20771), 'jinja2.Environment', 'jinja2.Environment', ([], {'autoescape': '(False)', 'loader': 'loader', 'undefined': 'StrictUndefined'}), '(autoescape=False, loader=loader, undefined=StrictUndefined)\n', (20711, 20771), False, 'import jinja2\n'), ((275, 297), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (287, 297), False, 'import pathlib\n'), ((1262, 1337), 'inflection.camelize', 'inflection.camelize', (['type_def.field_type.name'], {'uppercase_first_letter': '(False)'}), '(type_def.field_type.name, uppercase_first_letter=False)\n', (1281, 1337), False, 'import inflection\n'), ((16808, 16820), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16817, 16820), False, 'import json\n'), ((16945, 16957), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16954, 16957), False, 'import json\n'), ((11281, 11342), 'inflection.camelize', 'inflection.camelize', (['struct_name'], {'uppercase_first_letter': '(True)'}), '(struct_name, uppercase_first_letter=True)\n', (11300, 11342), False, 'import inflection\n'), ((8490, 8537), 'dataclasses.replace', 'dataclasses.replace', (['field'], {'type_def': 'dummy_type'}), '(field, type_def=dummy_type)\n', (8509, 8537), False, 'import dataclasses\n')] |
import datetime
entrada = input("Informe a data: ")
datainicial = datetime.datetime.strptime(entrada,"dd/mm/aa") | [
"datetime.datetime.strptime"
] | [((68, 115), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['entrada', '"""dd/mm/aa"""'], {}), "(entrada, 'dd/mm/aa')\n", (94, 115), False, 'import datetime\n')] |
"""Support for the Airzone diagnostics."""
from __future__ import annotations
from typing import Any
from aioairzone.const import AZD_MAC
from homeassistant.components.diagnostics.util import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DOMAIN
from .coordinator import AirzoneUpdateCoordinator
TO_REDACT = [
AZD_MAC,
]
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: AirzoneUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
return {
"info": async_redact_data(config_entry.data, TO_REDACT),
"data": async_redact_data(coordinator.data, TO_REDACT),
}
| [
"homeassistant.components.diagnostics.util.async_redact_data"
] | [((702, 749), 'homeassistant.components.diagnostics.util.async_redact_data', 'async_redact_data', (['config_entry.data', 'TO_REDACT'], {}), '(config_entry.data, TO_REDACT)\n', (719, 749), False, 'from homeassistant.components.diagnostics.util import async_redact_data\n'), ((767, 813), 'homeassistant.components.diagnostics.util.async_redact_data', 'async_redact_data', (['coordinator.data', 'TO_REDACT'], {}), '(coordinator.data, TO_REDACT)\n', (784, 813), False, 'from homeassistant.components.diagnostics.util import async_redact_data\n')] |
import torch
import torch.nn as nn
class CNNLayer(nn.Module):
"""
This layer is callable for 1d convolution and pooling functions with fatten result
"""
def __init__(self,
input_dim,
kernel_size=(3, 4, 5),
kernel_num=200):
"""
:param input_dim: input dim (type:int)
:param kernel_size: kernel size of convolution, default is (3,4,5) (type:tuple or list)
:param kernel_num: channel of each kernel, default is 200 (type:int)
"""
super(CNNLayer, self).__init__()
self.output_dim = len(kernel_size) * kernel_num
self.convolutions = nn.ModuleList(
[nn.Conv2d(1, kernel_num, (ks, input_dim)) for ks in kernel_size]
)
def forward(self, x):
con_ret = [c(x) for c in self.convolutions]
pooling_x = [nn.functional.max_pool1d(c.squeeze(-1), c.size()[2]) for c in con_ret]
flat_pool = torch.cat(pooling_x, 1)
return flat_pool # (batch, len(kernel_size)*kernel_num)
| [
"torch.cat",
"torch.nn.Conv2d"
] | [((960, 983), 'torch.cat', 'torch.cat', (['pooling_x', '(1)'], {}), '(pooling_x, 1)\n', (969, 983), False, 'import torch\n'), ((694, 735), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'kernel_num', '(ks, input_dim)'], {}), '(1, kernel_num, (ks, input_dim))\n', (703, 735), True, 'import torch.nn as nn\n')] |
"""Tests for lookup handler for env."""
# pylint: disable=no-self-use
# pyright: basic
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from runway.lookups.handlers.env import EnvLookup
if TYPE_CHECKING:
from ...factories import MockRunwayContext
ENV_VARS = {"str_val": "test"}
class TestEnvLookup:
"""Tests for EnvLookup."""
def test_handle(self, runway_context: MockRunwayContext) -> None:
"""Validate handle base functionality."""
runway_context.env.vars = ENV_VARS.copy()
result = EnvLookup.handle("str_val", context=runway_context)
assert result == "test"
def test_handle_not_found(self, runway_context: MockRunwayContext) -> None:
"""Validate exception when lookup cannot be resolved."""
runway_context.env.vars = ENV_VARS.copy()
with pytest.raises(ValueError):
EnvLookup.handle("NOT_VALID", context=runway_context)
| [
"pytest.raises",
"runway.lookups.handlers.env.EnvLookup.handle"
] | [((562, 613), 'runway.lookups.handlers.env.EnvLookup.handle', 'EnvLookup.handle', (['"""str_val"""'], {'context': 'runway_context'}), "('str_val', context=runway_context)\n", (578, 613), False, 'from runway.lookups.handlers.env import EnvLookup\n'), ((855, 880), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (868, 880), False, 'import pytest\n'), ((894, 947), 'runway.lookups.handlers.env.EnvLookup.handle', 'EnvLookup.handle', (['"""NOT_VALID"""'], {'context': 'runway_context'}), "('NOT_VALID', context=runway_context)\n", (910, 947), False, 'from runway.lookups.handlers.env import EnvLookup\n')] |
import pandas as pd
from os import listdir
from datetime import datetime as dtt
import logging
import json
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class WalletManager:
def __init__(self, client, states):
self.binance = client
self.states = states
self._wallet_history_file = "wallet_history.csv"
def update_balances(self):
logger.info("Updating account balances...")
r = self.binance.spot_account_trade.account_information()["content"]
balances = {}
for b in r["balances"]:
if float(b["free"]) == 0 and float(b["locked"]) == 0:
continue
balances[b["asset"]] = float(b["free"])
self.states["balances"] = balances
def fetch_trading_rules(self):
logger.info("Fetching trading rules...")
trade_rules = {}
r = self.binance.market_data.exchange_information()
for symbol in r["content"]["symbols"]:
pair = symbol["symbol"]
trade_rules[pair] = {}
for feelter in symbol["filters"]:
filter_type = feelter["filterType"]
trade_rules[pair][filter_type] = {}
for part in feelter.keys():
if part == "filterType":
continue
value = feelter[part]
if type(value) == str:
value = float(value)
trade_rules[pair][filter_type][part] = value
self.states["trade_rules"] = trade_rules
| [
"logging.getLogger"
] | [((117, 144), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (134, 144), False, 'import logging\n')] |
import tensorflow as tf
from wacky_rl import losses
from wacky_rl import models
from itertools import count
class SharedNetLoss(losses.WackyLoss):
_ids = count(0)
def __init__(self, alphas: list =None, sub_models=None, sub_agents=None, logger=None):
super().__init__()
self.alphas = alphas
self.sub_models = sub_models
self.sub_agents = sub_agents
self.logger = logger
self.id = next(self._ids)
if self.sub_models is None == self.sub_agents is None:
raise Exception('sub_models and sub_agents cant be both (un)defined at the same time')
def __call__(self, prediction, loss_args, *args, **kwargs):
#print(prediction)
loss = 0.0
if not self.sub_models is None:
for i in range(len(self.sub_models)):
#print(i)
loss = loss + self.alphas[i] * self.sub_models[i].train_step(prediction, *loss_args[i])
if not self.sub_agents is None:
for i in range(len(self.sub_agents)):
loss = loss + self.alphas[i] * self.sub_models[i].learn()
if not self.logger is None:
self.logger.log_mean('shared_net_loss_' + str(self.id), loss)
return loss | [
"itertools.count"
] | [((161, 169), 'itertools.count', 'count', (['(0)'], {}), '(0)\n', (166, 169), False, 'from itertools import count\n')] |
import cv2
from shapes import Myinit
class Circle(Myinit):
def __init__(self):
super(Circle, self).__init__()
self.center = (100,100)
self.radius = 50
self.color = (0,0,255)
self.thickness = -1
def form_shape(self):
self.img = cv2.circle(self.img, self.center, self.radius, self.color, -1)
def welcome(self):
print('Printing Circle...!')
def sides(self):
print("Circle has only arcs.")
def draw_shape(self):
self.welcome()
self.form_shape()
self.sides()
cv2.imshow("Circle", self.img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imshow"
] | [((297, 359), 'cv2.circle', 'cv2.circle', (['self.img', 'self.center', 'self.radius', 'self.color', '(-1)'], {}), '(self.img, self.center, self.radius, self.color, -1)\n', (307, 359), False, 'import cv2\n'), ((599, 629), 'cv2.imshow', 'cv2.imshow', (['"""Circle"""', 'self.img'], {}), "('Circle', self.img)\n", (609, 629), False, 'import cv2\n'), ((639, 653), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (650, 653), False, 'import cv2\n'), ((663, 686), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (684, 686), False, 'import cv2\n')] |
"""
Unit test script for convert.py
"""
import unittest
import pyeto
class TestConvert(unittest.TestCase):
def test_celsius2kelvin(self):
self.assertEqual(pyeto.celsius2kelvin(0), 273.15)
def test_kelvin2celsius(self):
self.assertEqual(pyeto.kelvin2celsius(273.15), 0.0)
def test_deg2rad(self):
self.assertEqual(pyeto.deg2rad(0), 0.0)
# Test values obtained form online conversion calculator
self.assertAlmostEqual(pyeto.deg2rad(-90), -1.5707963268, 10)
self.assertAlmostEqual(pyeto.deg2rad(90), 1.5707963268, 10)
self.assertAlmostEqual(pyeto.deg2rad(360), 6.2831853072, 10)
def test_rad2deg(self):
self.assertEqual(pyeto.rad2deg(0), 0.0)
# Test values obtained form online conversion calculator
self.assertAlmostEqual(pyeto.rad2deg(-1.5707963268), -90.0)
self.assertAlmostEqual(pyeto.rad2deg(1.5707963268), 90.0)
self.assertAlmostEqual(pyeto.rad2deg(6.2831853072), 360.0)
if __name__ == '__main__':
unittest.main() | [
"pyeto.celsius2kelvin",
"pyeto.deg2rad",
"pyeto.kelvin2celsius",
"unittest.main",
"pyeto.rad2deg"
] | [((1026, 1041), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1039, 1041), False, 'import unittest\n'), ((172, 195), 'pyeto.celsius2kelvin', 'pyeto.celsius2kelvin', (['(0)'], {}), '(0)\n', (192, 195), False, 'import pyeto\n'), ((266, 294), 'pyeto.kelvin2celsius', 'pyeto.kelvin2celsius', (['(273.15)'], {}), '(273.15)\n', (286, 294), False, 'import pyeto\n'), ((355, 371), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(0)'], {}), '(0)\n', (368, 371), False, 'import pyeto\n'), ((474, 492), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(-90)'], {}), '(-90)\n', (487, 492), False, 'import pyeto\n'), ((544, 561), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(90)'], {}), '(90)\n', (557, 561), False, 'import pyeto\n'), ((612, 630), 'pyeto.deg2rad', 'pyeto.deg2rad', (['(360)'], {}), '(360)\n', (625, 630), False, 'import pyeto\n'), ((704, 720), 'pyeto.rad2deg', 'pyeto.rad2deg', (['(0)'], {}), '(0)\n', (717, 720), False, 'import pyeto\n'), ((823, 851), 'pyeto.rad2deg', 'pyeto.rad2deg', (['(-1.5707963268)'], {}), '(-1.5707963268)\n', (836, 851), False, 'import pyeto\n'), ((891, 918), 'pyeto.rad2deg', 'pyeto.rad2deg', (['(1.5707963268)'], {}), '(1.5707963268)\n', (904, 918), False, 'import pyeto\n'), ((957, 984), 'pyeto.rad2deg', 'pyeto.rad2deg', (['(6.2831853072)'], {}), '(6.2831853072)\n', (970, 984), False, 'import pyeto\n')] |
# important necessary classes and functions
import math
from node import Node
from queue_assignment import Queue
from essentials import transition_nodes
from heuristic import HeuristicCalc
from copy import deepcopy
from flatland.envs.rail_env import RailEnv
# class for solving single agent using sipp
class SIPP:
"""
function for creating nodes with safe intervals from a node
parameters:
node : node for which nodes with safe intervals needs to be created
time : earliest starting time of the node
max_len : maximum length for checking safe intervals for the given node
existing_paths : list of paths for all previously completed agents
heuristic_func : heuristic function for calculating heuristic
heuristic_weight: weight for the heuristic
returns:
list of nodes with respective safe intervals
"""
def configure_state(self, node, time, max_len, existing_paths, heuristic_func, heuristic_weight):
safe_interval_nodes = [] # nodes with their respective safe intervals
node.parent = None # removing any relation with other nodes
# iterating from time to max_len for finding safe intervals of the node
for i in range(time, max_len):
# checking for conflict with other agents in that specific time
conflict = False
for p in existing_paths:
if 0 <= i - 1 < len(p) and p[i - 1] == node.loc:
conflict = True
if 0 <= i < len(p) and p[i] == node.loc:
conflict = True
if 0 <= i + 1 < len(p) and p[i + 1] == node.loc:
conflict = True
if conflict:
# if a safe interval for the node has been found
# add the new node with respective safe interval to the list
if node.interval[1] != math.inf:
safe_interval_nodes.append(node)
# create a new node each time a conflict occurs
node = Node(loc=node.loc, direction=node.direction, goal=node.goal, heuristic_func=heuristic_func, heuristic_weight=heuristic_weight)
else:
# if safe interval for the new node has not been calculated
# change node properties to align with safe interval for the node
if node.interval[1] == math.inf:
node.interval[0] = i
node.time = i
node.g = i
node.f = node.g + (node.h * node.heuristic_weight)
node.interval[1] = i
safe_interval_nodes.append(node)
return safe_interval_nodes
"""
function to create a node list which corresponds to a valid path
parameters:
node : last node from which path needs to be created
stop : node upto which path needs to be created
returns:
list of nodes which corresponds to a valid path from stop to node
"""
def create_path(self, node, stop):
path = [node] # initialising list
# iterate until parent of current node equals to stop node
while node.parent != stop:
# finding the wait time for the parent of the node
i = node.interval[0] - node.parent.interval[0]
# node's parent waiting for specified waiting time
while i > 0:
path.append(node.parent)
i -= 1
node = node.parent
path.reverse()
return path
"""
function for finding a valid path for an agent from start to goal
parameters:
start : start position of the agent
start_direction : start direction of the agent
goal : goal position of the agent
local_env : rail environment
existing_paths : list of paths for all previously completed agents
heuristic_calc : calculating heuristic value
heuristic_weight: weight for the heuristic
max_time : maximum time to run the function
returns:
list of nodes which corresponds to a valid path from stop to node
"""
def get_path(self, start: tuple, start_direction: int, goal: tuple, local_env: RailEnv, existing_paths: list, heuristic_calc: HeuristicCalc, \
heuristic_weight: int=1, max_time: int=0):
queue = Queue() # queue for storing nodes
dict_safe_interval_nodes = {} # dictionary for storing nodes with safe intervals for a specific nodes
# finding the maximum length of among the existing paths
max_len = 0
for p in existing_paths:
if len(p) > max_len:
max_len = len(p)
# creating nodes with safe intervals for the start node
search_node = Node(loc=start, direction=start_direction, goal=goal, heuristic_func=heuristic_calc.calculate_heuristic, \
heuristic_weight=heuristic_weight)
node = search_node
safe_interval_nodes = self.configure_state(node, search_node.time, max_len * 2, existing_paths, heuristic_calc.calculate_heuristic, \
heuristic_weight=heuristic_weight)
# stroing the nodes with safe intervals inside the dictionary for the start node
key = heuristic_calc.create_key(loc=node.loc, direction=node.direction)
dict_safe_interval_nodes[key] = safe_interval_nodes
# only keep the start node with safe interval starting at 0
for node in safe_interval_nodes:
if node.loc == start and node.interval[0] == 0:
queue.push(node)
search_node = queue.pop()
# iterate until queue becomes exhausted or goal is reached
while search_node != None and search_node.loc != search_node.goal:
# setting children of search node to all the reachable nodes from the search node
search_node = transition_nodes(node=search_node, env=local_env, heuristic_func=heuristic_calc.calculate_heuristic, \
heuristic_weight=heuristic_weight, wait=False)
# iterate over all child in the children list
for child in search_node.children:
node = child # deep copying, otherwise any change will also change the original copy
# searching for nodes with safe interval for the current node
# if found the exisiting nodes with safe interval for the current node are used
# otherwise nodes with safe intervals are created for the current node
key = heuristic_calc.create_key(loc=node.loc, direction=node.direction)
if key in dict_safe_interval_nodes:
safe_interval_nodes = dict_safe_interval_nodes[key]
else:
safe_interval_nodes = self.configure_state(node, child.time, max_len * 2, existing_paths, heuristic_calc.calculate_heuristic, \
heuristic_weight=heuristic_weight)
dict_safe_interval_nodes[key] = safe_interval_nodes
# iterate over all nodes with safe intervals
for node in safe_interval_nodes:
# if the child nodes safe interval does not align with the movement from the current node
# i.e. either the safe interval of the child node start after two time steps from the end of current node's safe interval
# or the safe interval of the child node ends before two time steps from the start of current node's safe interval
if search_node.interval[1] + 1 < node.interval[0] or search_node.interval[0] + 1 > node.interval[1]:
continue
# make necessary changes to the child node for being consistent with the current node
new_node = deepcopy(node)
new_node.parent = search_node
if new_node.interval[0] < search_node.interval[0] + 1:
new_node.interval[0] = search_node.interval[0] + 1
new_node.time = search_node.interval[0] + 1
new_node.g = search_node.interval[0] + 1
new_node.f = new_node.g + (new_node.h * new_node.heuristic_weight)
queue.push(new_node)
search_node = queue.pop()
# return empty list if queue is exhausted
if search_node == None:
return []
# return list of nodes which correspond a valid path
return self.create_path(search_node, None) | [
"queue_assignment.Queue",
"node.Node",
"essentials.transition_nodes",
"copy.deepcopy"
] | [((4384, 4391), 'queue_assignment.Queue', 'Queue', ([], {}), '()\n', (4389, 4391), False, 'from queue_assignment import Queue\n'), ((4842, 4986), 'node.Node', 'Node', ([], {'loc': 'start', 'direction': 'start_direction', 'goal': 'goal', 'heuristic_func': 'heuristic_calc.calculate_heuristic', 'heuristic_weight': 'heuristic_weight'}), '(loc=start, direction=start_direction, goal=goal, heuristic_func=\n heuristic_calc.calculate_heuristic, heuristic_weight=heuristic_weight)\n', (4846, 4986), False, 'from node import Node\n'), ((5944, 6100), 'essentials.transition_nodes', 'transition_nodes', ([], {'node': 'search_node', 'env': 'local_env', 'heuristic_func': 'heuristic_calc.calculate_heuristic', 'heuristic_weight': 'heuristic_weight', 'wait': '(False)'}), '(node=search_node, env=local_env, heuristic_func=\n heuristic_calc.calculate_heuristic, heuristic_weight=heuristic_weight,\n wait=False)\n', (5960, 6100), False, 'from essentials import transition_nodes\n'), ((2084, 2215), 'node.Node', 'Node', ([], {'loc': 'node.loc', 'direction': 'node.direction', 'goal': 'node.goal', 'heuristic_func': 'heuristic_func', 'heuristic_weight': 'heuristic_weight'}), '(loc=node.loc, direction=node.direction, goal=node.goal, heuristic_func\n =heuristic_func, heuristic_weight=heuristic_weight)\n', (2088, 2215), False, 'from node import Node\n'), ((7883, 7897), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (7891, 7897), False, 'from copy import deepcopy\n')] |
from scipy.optimize import fsolve
import numpy as np
from household_dist import HOUSEHOLD_DIST
def compute_household_infection_prob(prevalence, household_dist, SAR=0.3741):
"""
computes the probability that a household is infected given population level prevalence,
household size distribution and household secondary attack rate
INPUT:
prevalence = population level prevalence
household_dist = array-like, probability distribution of household sizes 1, 2, 3, ...
SAR = household secondary attack rate
"""
assert(np.absolute(np.sum(household_dist) - 1) < 1e-6)
exp_household_size = 0
exp_household_infection_multiplier = 0
for i in range(len(household_dist)):
exp_household_size += (i + 1) * household_dist[i]
exp_household_infection_multiplier += (1 + (i + 1 - 1) * SAR) * household_dist[i]
p = prevalence * exp_household_size / exp_household_infection_multiplier
return p
# deprecated, modified from Massey's groupt testing code
def match_prevalence(p_index, target_prevalence, household_dist, SAR):
# computes probability of a primary case given population level prevalence, household size distribution,
# and household secondary attack rate
# INPUT:
# p_index = probability of a primary case in the household
# target_prevalence = population level prevalence
# household_dist = probability distribution of household sizes 1,2,3,...
# SAR = household secondary attack rate
assert(np.absolute(np.sum(household_dist) - 1) < 1e-6)
exp_household_size = 0
for i in range(len(household_dist)):
exp_household_size += (i + 1) * household_dist[i]
frac_tot_infected = 0
for i in range(len(household_dist)):
frac_tot_infected += (i + 1) * (p_index + SAR * (1 - p_index) - SAR * (1 - p_index) ** (i + 1)) * household_dist[
i] / exp_household_size
return frac_tot_infected - target_prevalence
# deprecated, modified from Massey's group testing code
def eval_p_index(match_prevalence, target_prevalence, household_dist, SAR=0.3741):
return fsolve(match_prevalence, 0.005, args=(target_prevalence, household_dist, SAR))
if __name__ == '__main__':
household_dist = HOUSEHOLD_DIST['US']
print("household infection probability (US population): " + str(compute_household_infection_prob(0.01, household_dist)))
print("household infection probability (household size = 3): " + str(compute_household_infection_prob(0.01, household_dist=[0,0,1])))
| [
"scipy.optimize.fsolve",
"numpy.sum"
] | [((2099, 2177), 'scipy.optimize.fsolve', 'fsolve', (['match_prevalence', '(0.005)'], {'args': '(target_prevalence, household_dist, SAR)'}), '(match_prevalence, 0.005, args=(target_prevalence, household_dist, SAR))\n', (2105, 2177), False, 'from scipy.optimize import fsolve\n'), ((564, 586), 'numpy.sum', 'np.sum', (['household_dist'], {}), '(household_dist)\n', (570, 586), True, 'import numpy as np\n'), ((1508, 1530), 'numpy.sum', 'np.sum', (['household_dist'], {}), '(household_dist)\n', (1514, 1530), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
# from torch.nn import init
import functools
# from torch.autograd import Variable
import numpy as np
import pdb
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d,
affine=False,
track_running_stats=True)
else:
raise NotImplementedError('normalization layer [%s] is not found' %
norm_type)
return norm_layer
def define_G(input_nc,
output_nc,
ngf,
which_model_netG,
norm='batch',
use_dropout=False,
gpu_ids=[],
use_parallel=True,
learn_residual=False):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert (torch.cuda.is_available())
# pdb.set_trace()
# (Pdb) a
# input_nc = 3
# output_nc = 3
# ngf = 64
# which_model_netG = 'resnet_9blocks'
# norm = 'instance'
# use_dropout = True
# gpu_ids = [0]
# use_parallel = True
# learn_residual = True
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc,
output_nc,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
n_blocks=9,
gpu_ids=gpu_ids,
use_parallel=use_parallel,
learn_residual=learn_residual)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc,
output_nc,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
n_blocks=6,
gpu_ids=gpu_ids,
use_parallel=use_parallel,
learn_residual=learn_residual)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc,
output_nc,
7,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
gpu_ids=gpu_ids,
use_parallel=use_parallel,
learn_residual=learn_residual)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc,
output_nc,
8,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
gpu_ids=gpu_ids,
use_parallel=use_parallel,
learn_residual=learn_residual)
else:
raise NotImplementedError(
'Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc,
ndf,
which_model_netD,
n_layers_D=3,
norm='batch',
use_sigmoid=False,
gpu_ids=[],
use_parallel=True):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
# pdb.set_trace()
# (Pdb) a
# input_nc = 3
# ndf = 64
# which_model_netD = 'basic'
# n_layers_D = 3
# norm = 'instance'
# use_sigmoid = True
# gpu_ids = [0]
# use_parallel = True
if use_gpu:
assert (torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc,
ndf,
n_layers=3,
norm_layer=norm_layer,
use_sigmoid=use_sigmoid,
gpu_ids=gpu_ids,
use_parallel=use_parallel)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc,
ndf,
n_layers_D,
norm_layer=norm_layer,
use_sigmoid=use_sigmoid,
gpu_ids=gpu_ids,
use_parallel=use_parallel)
else:
raise NotImplementedError(
'Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from <NAME>'s architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self,
input_nc,
output_nc,
ngf=64,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
n_blocks=6,
gpu_ids=[],
use_parallel=True,
learn_residual=False,
padding_type='reflect'):
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
# pdb.set_trace()
# (Pdb) a
# self = ResnetGenerator()
# input_nc = 3
# output_nc = 3
# ngf = 64
# norm_layer = functools.partial(<class
# 'torch.nn.modules.instancenorm.InstanceNorm2d'>, affine=False, track_running_stats=True)
# use_dropout = True
# n_blocks = 9
# gpu_ids = [0]
# use_parallel = True
# learn_residual = True
# padding_type = 'reflect'
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.use_parallel = use_parallel
self.learn_residual = learn_residual
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)
]
n_downsampling = 2
# 下采样
# for i in range(n_downsampling): # [0,1]
# mult = 2**i
#
# model += [
# nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
# norm_layer(ngf * mult * 2),
# nn.ReLU(True)
# ]
model += [
nn.Conv2d(64,
128,
kernel_size=3,
stride=2,
padding=1,
bias=use_bias),
norm_layer(128),
nn.ReLU(True),
nn.Conv2d(128,
256,
kernel_size=3,
stride=2,
padding=1,
bias=use_bias),
norm_layer(256),
nn.ReLU(True)
]
# 中间的残差网络
# mult = 2**n_downsampling
for i in range(n_blocks):
# model += [
# ResnetBlock(
# ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
# use_dropout=use_dropout, use_bias=use_bias)
# ]
model += [
ResnetBlock(256,
padding_type=padding_type,
norm_layer=norm_layer,
use_dropout=use_dropout,
use_bias=use_bias)
]
# 上采样
# for i in range(n_downsampling):
# mult = 2**(n_downsampling - i)
#
# model += [
# nn.ConvTranspose2d(
# ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2,
# padding=1, output_padding=1, bias=use_bias),
# norm_layer(int(ngf * mult / 2)),
# nn.ReLU(True)
# ]
model += [
nn.ConvTranspose2d(256,
128,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=use_bias),
norm_layer(128),
nn.ReLU(True),
nn.ConvTranspose2d(128,
64,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=use_bias),
norm_layer(64),
nn.ReLU(True),
]
model += [
nn.ReflectionPad2d(3),
nn.Conv2d(64, output_nc, kernel_size=7, padding=0),
nn.Tanh()
]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(
input.data, torch.cuda.FloatTensor) and self.use_parallel:
output = nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
output = self.model(input)
if self.learn_residual:
# output = input + output
output = torch.clamp(input + output, min=-1, max=1)
return output
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
# pdb.set_trace()
# (Pdb) a
# self = ResnetBlock()
# dim = 256
# padding_type = 'reflect'
# norm_layer = functools.partial(<class 'torch.nn.modules.
# instancenorm.InstanceNorm2d'>, affine=False, track_running_stats=True)
# use_dropout = True
# use_bias = True
padAndConv = {
'reflect': [
nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)
],
'replicate': [
nn.ReplicationPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)
],
'zero':
[nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)]
}
try:
blocks = padAndConv[padding_type] + [
norm_layer(dim), nn.ReLU(True)
] + [nn.Dropout(0.5)
] if use_dropout else [] + padAndConv[padding_type] + [
norm_layer(dim)
]
except:
raise NotImplementedError('padding [%s] is not implemented' %
padding_type)
self.conv_block = nn.Sequential(*blocks)
# self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
# def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
# padAndConv = {
# 'reflect': [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)],
# 'replicate': [nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)],
# 'zero': [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)]
# }
# try:
# blocks = [
# padAndConv[padding_type],
#
# norm_layer(dim),
# nn.ReLU(True),
# nn.Dropout(0.5) if use_dropout else None,
#
# padAndConv[padding_type],
#
# norm_layer(dim)
# ]
# except:
# raise NotImplementedError('padding [%s] is not implemented' % padding_type)
#
# return nn.Sequential(*blocks)
# blocks = []
# if padding_type == 'reflect':
# blocks += [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)]
# elif padding_type == 'replicate':
# blocks += [nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)]
# elif padding_type == 'zero':
# blocks += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)]
# else:
# raise NotImplementedError('padding [%s] is not implemented' % padding_type)
#
# blocks += [
# norm_layer(dim),
# nn.ReLU(True),
# nn.Dropout(0.5) if use_dropout else None
# ]
#
# if padding_type == 'reflect':
# blocks += [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)]
# elif padding_type == 'replicate':
# blocks += [nn.ReplicationPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, bias=use_bias)]
# elif padding_type == 'zero':
# blocks += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=use_bias)]
# else:
# raise NotImplementedError('padding [%s] is not implemented' % padding_type)
#
# blocks += [
# norm_layer(dim)
# ]
#
# return nn.Sequential(*blocks)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self,
input_nc,
output_nc,
num_downs,
ngf=64,
norm_layer=nn.BatchNorm2d,
use_dropout=False,
gpu_ids=[],
use_parallel=True,
learn_residual=False):
super(UnetGenerator, self).__init__()
pdb.set_trace()
self.gpu_ids = gpu_ids
self.use_parallel = use_parallel
self.learn_residual = learn_residual
# currently support only input_nc == output_nc
assert (input_nc == output_nc)
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8,
ngf * 8,
norm_layer=norm_layer,
innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8,
ngf * 8,
unet_block,
norm_layer=norm_layer,
use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4,
ngf * 8,
unet_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2,
ngf * 4,
unet_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf,
ngf * 2,
unet_block,
norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc,
ngf,
unet_block,
outermost=True,
norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(
input.data, torch.cuda.FloatTensor) and self.use_parallel:
output = nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
output = self.model(input)
if self.learn_residual:
output = input + output
output = torch.clamp(output, min=-1, max=1)
return output
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self,
outer_nc,
inner_nc,
submodule=None,
outermost=False,
innermost=False,
norm_layer=nn.BatchNorm2d,
use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
pdb.set_trace()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
dConv = nn.Conv2d(outer_nc,
inner_nc,
kernel_size=4,
stride=2,
padding=1,
bias=use_bias)
dRelu = nn.LeakyReLU(0.2, True)
dNorm = norm_layer(inner_nc)
uRelu = nn.ReLU(True)
uNorm = norm_layer(outer_nc)
if outermost:
uConv = nn.ConvTranspose2d(inner_nc * 2,
outer_nc,
kernel_size=4,
stride=2,
padding=1)
dModel = [dConv]
uModel = [uRelu, uConv, nn.Tanh()]
model = [dModel, submodule, uModel]
# model = [
# # Down
# nn.Conv2d( outer_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias),
#
# submodule,
# # Up
# nn.ReLU(True),
# nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1),
# nn.Tanh()
# ]
elif innermost:
uConv = nn.ConvTranspose2d(inner_nc,
outer_nc,
kernel_size=4,
stride=2,
padding=1,
bias=use_bias)
dModel = [dRelu, dConv]
uModel = [uRelu, uConv, uNorm]
model = [dModel, uModel]
# model = [
# # down
# nn.LeakyReLU(0.2, True),
# # up
# nn.ReLU(True),
# nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias),
# norm_layer(outer_nc)
# ]
else:
uConv = nn.ConvTranspose2d(inner_nc * 2,
outer_nc,
kernel_size=4,
stride=2,
padding=1,
bias=use_bias)
dModel = [dRelu, dConv, dNorm]
uModel = [uRelu, uConv, uNorm]
model = [dModel, submodule, uModel]
model += [nn.Dropout(0.5)] if use_dropout else []
# if use_dropout:
# model = down + [submodule] + up + [nn.Dropout(0.5)]
# else:
# model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self,
input_nc,
ndf=64,
n_layers=3,
norm_layer=nn.BatchNorm2d,
use_sigmoid=False,
gpu_ids=[],
use_parallel=True):
super(NLayerDiscriminator, self).__init__()
# pdb.set_trace()
# (Pdb) a
# self = NLayerDiscriminator()
# input_nc = 3
# ndf = 64
# n_layers = 3
# norm_layer = functools.partial(<class
# 'torch.nn.modules.instancenorm.InstanceNorm2d'>, affine=False, track_running_stats=True)
# use_sigmoid = True
# gpu_ids = [0]
# use_parallel = True
self.gpu_ids = gpu_ids
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = int(np.ceil((kw - 1) / 2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(
input.data, torch.cuda.FloatTensor) and self.use_parallel:
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.parallel.data_parallel",
"numpy.ceil",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.ReflectionPad2d",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"functools.partial",
"torch.nn.ReplicationPad2d",
... | [((739, 785), 'functools.partial', 'functools.partial', (['nn.BatchNorm2d'], {'affine': '(True)'}), '(nn.BatchNorm2d, affine=True)\n', (756, 785), False, 'import functools\n'), ((1527, 1552), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1550, 1552), False, 'import torch\n'), ((4480, 4505), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4503, 4505), False, 'import torch\n'), ((10402, 10423), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (10415, 10423), True, 'import torch.nn as nn\n'), ((12238, 12260), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (12251, 12260), True, 'import torch.nn as nn\n'), ((15338, 15353), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15351, 15353), False, 'import pdb\n'), ((18169, 18184), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (18182, 18184), False, 'import pdb\n'), ((18417, 18502), 'torch.nn.Conv2d', 'nn.Conv2d', (['outer_nc', 'inner_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias\n )\n', (18426, 18502), True, 'import torch.nn as nn\n'), ((18644, 18667), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (18656, 18667), True, 'import torch.nn as nn\n'), ((18721, 18734), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (18728, 18734), True, 'import torch.nn as nn\n'), ((20917, 20938), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (20930, 20938), True, 'import torch.nn as nn\n'), ((23460, 23484), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence'], {}), '(*sequence)\n', (23473, 23484), True, 'import torch.nn as nn\n'), ((841, 917), 'functools.partial', 'functools.partial', (['nn.InstanceNorm2d'], {'affine': '(False)', 'track_running_stats': '(True)'}), '(nn.InstanceNorm2d, affine=False, track_running_stats=True)\n', (858, 917), False, 'import functools\n'), ((7472, 7493), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (7490, 7493), True, 'import torch.nn as nn\n'), ((7507, 7572), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': '(7)', 'padding': '(0)', 'bias': 'use_bias'}), '(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias)\n', (7516, 7572), True, 'import torch.nn as nn\n'), ((7615, 7628), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7622, 7628), True, 'import torch.nn as nn\n'), ((8015, 8084), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(64, 128, kernel_size=3, stride=2, padding=1, bias=use_bias)\n', (8024, 8084), True, 'import torch.nn as nn\n'), ((8237, 8250), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8244, 8250), True, 'import torch.nn as nn\n'), ((8264, 8334), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(128, 256, kernel_size=3, stride=2, padding=1, bias=use_bias)\n', (8273, 8334), True, 'import torch.nn as nn\n'), ((8487, 8500), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8494, 8500), True, 'import torch.nn as nn\n'), ((9527, 9628), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)', 'bias': 'use_bias'}), '(256, 128, kernel_size=3, stride=2, padding=1,\n output_padding=1, bias=use_bias)\n', (9545, 9628), True, 'import torch.nn as nn\n'), ((9853, 9866), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9860, 9866), True, 'import torch.nn as nn\n'), ((9880, 9980), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)', 'bias': 'use_bias'}), '(128, 64, kernel_size=3, stride=2, padding=1,\n output_padding=1, bias=use_bias)\n', (9898, 9980), True, 'import torch.nn as nn\n'), ((10204, 10217), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10211, 10217), True, 'import torch.nn as nn\n'), ((10261, 10282), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (10279, 10282), True, 'import torch.nn as nn\n'), ((10296, 10346), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'output_nc'], {'kernel_size': '(7)', 'padding': '(0)'}), '(64, output_nc, kernel_size=7, padding=0)\n', (10305, 10346), True, 'import torch.nn as nn\n'), ((10360, 10369), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (10367, 10369), True, 'import torch.nn as nn\n'), ((10591, 10649), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (10616, 10649), True, 'import torch.nn as nn\n'), ((10794, 10836), 'torch.clamp', 'torch.clamp', (['(input + output)'], {'min': '(-1)', 'max': '(1)'}), '(input + output, min=-1, max=1)\n', (10805, 10836), False, 'import torch\n'), ((17388, 17446), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (17413, 17446), True, 'import torch.nn as nn\n'), ((17589, 17623), 'torch.clamp', 'torch.clamp', (['output'], {'min': '(-1)', 'max': '(1)'}), '(output, min=-1, max=1)\n', (17600, 17623), False, 'import torch\n'), ((18815, 18893), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n', (18833, 18893), True, 'import torch.nn as nn\n'), ((22170, 22191), 'numpy.ceil', 'np.ceil', (['((kw - 1) / 2)'], {}), '((kw - 1) / 2)\n', (22177, 22191), True, 'import numpy as np\n'), ((22226, 22290), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)\n', (22235, 22290), True, 'import torch.nn as nn\n'), ((22304, 22327), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (22316, 22327), True, 'import torch.nn as nn\n'), ((22965, 23068), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw', 'bias': 'use_bias'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw, bias=use_bias)\n', (22974, 23068), True, 'import torch.nn as nn\n'), ((23227, 23250), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (23239, 23250), True, 'import torch.nn as nn\n'), ((23296, 23363), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult)', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n', (23305, 23363), True, 'import torch.nn as nn\n'), ((23655, 23713), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (23680, 23713), True, 'import torch.nn as nn\n'), ((11439, 11460), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (11457, 11460), True, 'import torch.nn as nn\n'), ((11478, 11527), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'bias': 'use_bias'}), '(dim, dim, kernel_size=3, bias=use_bias)\n', (11487, 11527), True, 'import torch.nn as nn\n'), ((11586, 11608), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['(1)'], {}), '(1)\n', (11605, 11608), True, 'import torch.nn as nn\n'), ((11626, 11675), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'bias': 'use_bias'}), '(dim, dim, kernel_size=3, bias=use_bias)\n', (11635, 11675), True, 'import torch.nn as nn\n'), ((11724, 11784), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': 'use_bias'}), '(dim, dim, kernel_size=3, padding=1, bias=use_bias)\n', (11733, 11784), True, 'import torch.nn as nn\n'), ((19115, 19124), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (19122, 19124), True, 'import torch.nn as nn\n'), ((19555, 19648), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['inner_nc', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1,\n bias=use_bias)\n', (19573, 19648), True, 'import torch.nn as nn\n'), ((20258, 20356), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(inner_nc * 2)', 'outer_nc'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding\n =1, bias=use_bias)\n', (20276, 20356), True, 'import torch.nn as nn\n'), ((22533, 22636), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw', 'bias': 'use_bias'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,\n padding=padw, bias=use_bias)\n', (22542, 22636), True, 'import torch.nn as nn\n'), ((22823, 22846), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (22835, 22846), True, 'import torch.nn as nn\n'), ((23424, 23436), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (23434, 23436), True, 'import torch.nn as nn\n'), ((11924, 11939), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (11934, 11939), True, 'import torch.nn as nn\n'), ((20704, 20719), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (20714, 20719), True, 'import torch.nn as nn\n'), ((11893, 11906), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (11900, 11906), True, 'import torch.nn as nn\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 16:12:34 2019
@author: bosmanjw
"""
import httpInterface
import sys
sys.path.append('../')
sys.path.append('../shamir_ss')
from shamir_ss.reconstruct_value import reconstruct_value
import config
import asyncio
import json
class Combiner(object):
def __init__(self, SSScheme, responseUrl):
self.requestMap = {}
self.SSScheme = SSScheme
self.requestor = httpInterface.Requestor(responseUrl)
self.listener = httpInterface.Listener(self.handle_request)
self.loop = asyncio.get_event_loop()
def ensure_key_exists(self, key):
if key not in self.requestMap.keys():
self.requestMap[key] = []
def has_sufficient_data(self, requestId, share):
return len(self.requestMap[requestId]) >= share['r']
def request(self, requestId, share):
print(type(share))
#Create entry if it does not exist yet
self.ensure_key_exists(requestId)
#Append request data
self.requestMap[requestId].append(share)
if self.has_sufficient_data(requestId, share):
print('Sufficient shares collected!')
shares = self.requestMap[requestId]
#Todo (Joost): queue task in worker pool
value = reconstruct_value(shares, SSScheme=self.SSScheme)
data = {"id": requestId, "result": json.dumps(value)}
print('Combining completed!', data)
asyncio.ensure_future(self.requestor.send_request(data))
def handle_request(self, data):
body = data
requestId = body['id']
share = body['share']
return self.request(requestId, share)
def start(self):
self.listener.start()
if __name__ == "__main__":
combiner = Combiner(config.SSScheme, config.phoneInterfaceUrl)
combiner.start()
| [
"httpInterface.Requestor",
"json.dumps",
"httpInterface.Listener",
"asyncio.get_event_loop",
"sys.path.append",
"shamir_ss.reconstruct_value.reconstruct_value"
] | [((129, 151), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (144, 151), False, 'import sys\n'), ((153, 184), 'sys.path.append', 'sys.path.append', (['"""../shamir_ss"""'], {}), "('../shamir_ss')\n", (168, 184), False, 'import sys\n'), ((453, 489), 'httpInterface.Requestor', 'httpInterface.Requestor', (['responseUrl'], {}), '(responseUrl)\n', (476, 489), False, 'import httpInterface\n'), ((515, 558), 'httpInterface.Listener', 'httpInterface.Listener', (['self.handle_request'], {}), '(self.handle_request)\n', (537, 558), False, 'import httpInterface\n'), ((580, 604), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (602, 604), False, 'import asyncio\n'), ((1344, 1393), 'shamir_ss.reconstruct_value.reconstruct_value', 'reconstruct_value', (['shares'], {'SSScheme': 'self.SSScheme'}), '(shares, SSScheme=self.SSScheme)\n', (1361, 1393), False, 'from shamir_ss.reconstruct_value import reconstruct_value\n'), ((1442, 1459), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (1452, 1459), False, 'import json\n')] |
"""Main object for getting the PECO outage counter data."""
from __future__ import annotations
from typing import Any
import aiohttp
from .const import *
from pydantic import BaseModel
class PecoOutageApi:
"""Main object for getting the PECO outage counter data."""
def __init__(self) -> None:
"""Initialize the PECO outage counter object."""
pass
@staticmethod
async def get_outage_count(county: str, websession: aiohttp.ClientSession | None=None) -> OutageResults:
"""Get the outage count for the given county."""
if county not in COUNTY_LIST:
raise InvalidCountyError(f"{county} is not a valid county")
if websession is not None:
async with websession.get(API_URL) as r:
data = await r.json()
else:
async with aiohttp.ClientSession() as session:
async with session.get(API_URL) as r:
data = await r.json()
if r.status != 200:
raise HttpError("Error getting PECO outage counter data")
try:
id_that_has_the_report: str = data["data"]["interval_generation_data"]
except KeyError as err:
raise BadJSONError("Error getting PECO outage counter data") from err
report_url = REPORT_URL.format(id_that_has_the_report)
if websession is not None:
async with websession.get(report_url) as r:
data = await r.json()
else:
async with aiohttp.ClientSession() as session:
async with session.get(report_url) as r:
data = await r.json()
if r.status != 200:
raise HttpError("Error getting PECO outage counter data")
try:
areas: list[dict[str, Any]] = data["file_data"]["areas"]
except KeyError as err:
raise BadJSONError("Bad JSON returned from PECO outage counter") from err
outage_result: OutageResults = OutageResults(customers_out=0, percent_customers_out=0, outage_count=0, customers_served=0)
for area in areas:
if area["name"] == county:
customers_out = area["cust_a"]["val"]
percent_customers_out = area["percent_cust_a"]["val"]
outage_count = area["n_out"]
customers_served = area["cust_s"]
outage_result = OutageResults(
customers_out=customers_out,
percent_customers_out=percent_customers_out,
outage_count=outage_count,
customers_served=customers_served
)
return outage_result
@staticmethod
async def get_outage_totals(websession: aiohttp.ClientSession | None=None) -> OutageResults:
"""Get the outage totals for the given county and mode."""
if websession is not None:
async with websession.get(API_URL) as r:
data = await r.json()
else:
async with aiohttp.ClientSession() as session:
async with session.get(API_URL) as r:
data = await r.json()
if r.status != 200:
raise HttpError("Error getting PECO outage counter data")
try:
id_that_has_the_report: str = data["data"]["interval_generation_data"]
except KeyError as err:
raise BadJSONError("Error getting PECO outage counter data") from err
report_url = REPORT_URL.format(id_that_has_the_report)
if websession is not None:
async with websession.get(report_url) as r:
data = await r.json()
else:
async with aiohttp.ClientSession() as session:
async with session.get(report_url) as r:
data = await r.json()
if r.status != 200:
raise HttpError("Error getting PECO outage counter data")
try:
totals = data["file_data"]["totals"]
except KeyError as err:
raise BadJSONError("Bad JSON returned from PECO outage counter") from err
return OutageResults(
customers_out=totals["cust_a"]["val"],
percent_customers_out=totals["percent_cust_a"]["val"],
outage_count=totals["n_out"],
customers_served=totals["cust_s"]
)
@staticmethod
async def meter_check(phone_number: str, websession: aiohttp.ClientSession | None=None) -> bool:
"""Check if power is being delivered to the house."""
if len(phone_number) != 10:
raise ValueError("Phone number must be 10 digits")
if not phone_number.isdigit():
raise ValueError("Phone number must be numeric")
data1: dict[str, Any]
if websession is not None:
async with websession.post(QUERY_URL, json={"phone": phone_number}) as response:
data1 = await response.json(content_type='text/html')
else:
async with aiohttp.ClientSession() as session:
async with session.post(QUERY_URL, json={"phone": phone_number}) as response:
data1 = await response.json(content_type='text/html')
if data1["success"] != True:
raise HttpError("Error checking meter")
if data1["data"][0]["smartMeterStatus"] == False:
raise IncompatibleMeterError("Meter is not compatible with smart meter checking")
auid = data1["data"][0]["auid"]
acc_number = data1["data"][0]["accountNumber"]
data2: dict[str, Any]
if websession is not None:
async with websession.post(PRECHECK_URL, json={"auid": auid, "accountNumber": acc_number, "phone": phone_number}) as response:
data2 = await response.json(content_type='text/html')
else:
async with aiohttp.ClientSession() as session:
async with session.post(PRECHECK_URL, json={"auid": auid, "accountNumber": acc_number, "phone": phone_number}) as response:
data2 = await response.json(content_type='text/html')
if data2["success"] != True:
raise HttpError("Error checking meter")
if data2["data"]["meterPing"] == False:
raise UnresponsiveMeterError("Meter is not responding")
ping_result: bool
if websession is not None:
async with websession.post(PING_URL, json={"auid": auid, "accountNumber": acc_number}) as response:
data3 = await response.json(content_type='text/html')
if data3["success"] != True:
raise HttpError("Error checking meter")
ping_result = bool(data3["data"]["meterInfo"]["pingResult"])
else:
async with aiohttp.ClientSession() as session:
async with session.post(PING_URL, json={"auid": auid, "accountNumber": acc_number}) as response:
data3 = await response.json(content_type='text/html')
if data3["success"] != True:
raise HttpError("Error checking meter")
ping_result = bool(data3["data"]["meterInfo"]["pingResult"])
return ping_result
@staticmethod
async def get_map_alerts(websession: aiohttp.ClientSession | None=None) -> AlertResults:
"""Get the alerts that show on the outage map."""
if websession is not None:
async with websession.get(API_URL) as r:
data = await r.json()
else:
async with aiohttp.ClientSession() as session:
async with session.get(API_URL) as r:
data = await r.json()
if r.status != 200:
raise HttpError("Error getting PECO outage counter data")
try:
alert_deployment_id: str = data["controlCenter"]["alertDeploymentId"]
except KeyError as err:
raise BadJSONError("Error getting PECO outage counter data") from err
alerts_url = ALERTS_URL.format(alert_deployment_id)
if websession is not None:
async with websession.get(alerts_url) as r:
data1 = await r.json()
else:
async with aiohttp.ClientSession() as session:
async with session.get(alerts_url) as r:
data1 = await r.json()
if r.status != 200:
raise HttpError("Error getting PECO outage counter data")
try:
alert = data1["_embedded"]["deployedAlertResourceList"][0]["data"][0] # There is always only one alert
except KeyError as err:
# I am making the assumption that there are no alerts. I have never seen the response when there are no alerts.
# This API is undocumented. If anyone finds out the response when there are no alerts, please open an issue.
return AlertResults(
alert_content="",
alert_title="",
)
if alert["bannerTitle"] == "Using the Outage Map": # junk data
return AlertResults(
alert_content="",
alert_title="",
)
parsed_content = TAG_RE.sub('', alert["content"].replace("<br />", "\n\n"))
return AlertResults(
alert_content=parsed_content,
alert_title=alert["bannerTitle"],
)
class OutageResults(BaseModel):
customers_out: int
percent_customers_out: int
outage_count: int
customers_served: int
class AlertResults(BaseModel):
alert_content: str
alert_title: str
class InvalidCountyError(ValueError):
"""Raised when the county is invalid."""
class HttpError(Exception):
"""Raised when the status code is not 200."""
class BadJSONError(Exception):
"""Raised when the JSON is invalid."""
class MeterError(Exception):
"""Generic meter error."""
class IncompatibleMeterError(MeterError):
"""Raised when the meter is not compatible with the API."""
class UnresponsiveMeterError(MeterError):
"""Raised when the meter is not responding."""
| [
"aiohttp.ClientSession"
] | [((839, 862), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (860, 862), False, 'import aiohttp\n'), ((1527, 1550), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1548, 1550), False, 'import aiohttp\n'), ((3050, 3073), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (3071, 3073), False, 'import aiohttp\n'), ((3730, 3753), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (3751, 3753), False, 'import aiohttp\n'), ((5068, 5091), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5089, 5091), False, 'import aiohttp\n'), ((5939, 5962), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5960, 5962), False, 'import aiohttp\n'), ((6883, 6906), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6904, 6906), False, 'import aiohttp\n'), ((7673, 7696), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (7694, 7696), False, 'import aiohttp\n'), ((8366, 8389), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (8387, 8389), False, 'import aiohttp\n')] |
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt
from dataset import mydataset_PCA
from torch.utils.data import DataLoader
from tqdm import tqdm
import pickle
import sklearn.decomposition as dc
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'--aug', help='if processing data augmentation or not', required=True, default=False ,type=bool)
args = parser.parse_args()
image_dir = "shopee-product-matching/train_images"
text_path = "shopee-product-matching/train.csv"
epoch_num = 1 #总样本循环次数
batch_size = 1 #训练时的一组数据的大小
#读取数据集,并取出10%作为mini数据集 only test the easy mode
train_dataset = mydataset_PCA(image_dir=image_dir,text_path = text_path, is_train=True, is_augmentation=args.aug)
test_dataset = mydataset_PCA(image_dir=image_dir,text_path = text_path, is_train=False)
feas_train = [] # create the dataset feature base in low dimension
labels_train = []
train_loader = DataLoader(dataset = train_dataset,batch_size = 1,shuffle = True)
pca_estimator = dc.PCA(n_components=100)
# for image,text in train_loader: #遍历每一组数据
img_train = []
for batch_data in tqdm(train_loader):
image,text=batch_data
# img = np.squeeze(image.numpy())
img = image.numpy()
img = img[0,...]
# only using the
# img_train.append(img.reshape(1,-1))
img_train.append(img)
img_num = img.shape[0]
# labels_train.append(text[0])
labels_train.extend([text[0] for _ in range(img_num)])
# do the PCA
labels_train = np.array(labels_train)
img_train = np.concatenate(img_train,axis=0)
img_mean = np.mean(img_train, axis=0, keepdims=True)
img_train = img_train - img_mean
trainned_base = pca_estimator.fit_transform(img_train)
components_ = pca_estimator.components_
# do the test
test_loader = DataLoader(dataset = test_dataset,batch_size = 1,shuffle = True)
acc5 = 0
acc1 = 0
for batch_data in tqdm(test_loader):
image,text=batch_data
#处理图像数据,提取SIFT特征
# img_test = np.squeeze(image.numpy())
img_test = image.numpy()
# img_c = img.reshape(1,-1)
img_c = img_test[0,...]
img_c = img_c - img_mean
img_feature = pca_estimator.transform(img_c)
distance_s = np.sum((img_feature - trainned_base) ** 2, axis=-1)
idx_sort = np.argsort(distance_s)
idx_top5 = idx_sort[:5]
pred_label = labels_train[idx_top5]
if text[0] in pred_label: #TODO: text need to be further index
acc5 = acc5 + 1
if text[0] == pred_label[0]:
acc1 = acc1 + 1
# err_rate = err/len(test_dataset)
# acc = 1-err_rate
acc_rate5 = acc5 / len(test_dataset)
acc_rate1 = acc1 / len(test_dataset)
print('----------------------------')
# print(f"err = {err_rate:.4f}")
print(f"acc1 = {acc_rate1:.4f}")
print(f"acc5 = {acc_rate5:.4f}")
| [
"numpy.mean",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"tqdm.tqdm",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"dataset.mydataset_PCA"
] | [((251, 312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (274, 312), False, 'import argparse\n'), ((688, 788), 'dataset.mydataset_PCA', 'mydataset_PCA', ([], {'image_dir': 'image_dir', 'text_path': 'text_path', 'is_train': '(True)', 'is_augmentation': 'args.aug'}), '(image_dir=image_dir, text_path=text_path, is_train=True,\n is_augmentation=args.aug)\n', (701, 788), False, 'from dataset import mydataset_PCA\n'), ((801, 872), 'dataset.mydataset_PCA', 'mydataset_PCA', ([], {'image_dir': 'image_dir', 'text_path': 'text_path', 'is_train': '(False)'}), '(image_dir=image_dir, text_path=text_path, is_train=False)\n', (814, 872), False, 'from dataset import mydataset_PCA\n'), ((975, 1036), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=1, shuffle=True)\n', (985, 1036), False, 'from torch.utils.data import DataLoader\n'), ((1058, 1082), 'sklearn.decomposition.PCA', 'dc.PCA', ([], {'n_components': '(100)'}), '(n_components=100)\n', (1064, 1082), True, 'import sklearn.decomposition as dc\n'), ((1159, 1177), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (1163, 1177), False, 'from tqdm import tqdm\n'), ((1528, 1550), 'numpy.array', 'np.array', (['labels_train'], {}), '(labels_train)\n', (1536, 1550), True, 'import numpy as np\n'), ((1563, 1596), 'numpy.concatenate', 'np.concatenate', (['img_train'], {'axis': '(0)'}), '(img_train, axis=0)\n', (1577, 1596), True, 'import numpy as np\n'), ((1607, 1648), 'numpy.mean', 'np.mean', (['img_train'], {'axis': '(0)', 'keepdims': '(True)'}), '(img_train, axis=0, keepdims=True)\n', (1614, 1648), True, 'import numpy as np\n'), ((1805, 1865), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(1)', 'shuffle': '(True)'}), '(dataset=test_dataset, batch_size=1, shuffle=True)\n', (1815, 1865), False, 'from torch.utils.data import DataLoader\n'), ((1907, 1924), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1911, 1924), False, 'from tqdm import tqdm\n'), ((2200, 2251), 'numpy.sum', 'np.sum', (['((img_feature - trainned_base) ** 2)'], {'axis': '(-1)'}), '((img_feature - trainned_base) ** 2, axis=-1)\n', (2206, 2251), True, 'import numpy as np\n'), ((2267, 2289), 'numpy.argsort', 'np.argsort', (['distance_s'], {}), '(distance_s)\n', (2277, 2289), True, 'import numpy as np\n')] |
import environ
env = environ.Env(
DEBUG=(bool, True)
)
| [
"environ.Env"
] | [((22, 53), 'environ.Env', 'environ.Env', ([], {'DEBUG': '(bool, True)'}), '(DEBUG=(bool, True))\n', (33, 53), False, 'import environ\n')] |
#coding: utf-8
import requests
import AdvancedHTMLParser
import json
import datetime
import html
from unidecode import unidecode
# Placeholder, will be replaced by reference to main cfg object
# This is only to satisfy builtin vs code verifier
try:
cfg = None
cfg.teachermap_filename = None
except:
pass
print("SEMI-LEGACY OVERRIDES PARSER!!!!!!!!!")
def search_for_overrides():
r = requests.get("http://www.zseil.edu.pl/zastepstwa/")
r.encoding = "UTF-8"
if r.status_code != 200:
return False
listparser = AdvancedHTMLParser.AdvancedHTMLParser()
listparser.parseStr(r.text)
totalOutput = {}
panel = listparser.getElementById("panel_srodkowy_szerszy").getHTML()
listparser = AdvancedHTMLParser.AdvancedHTMLParser()
listparser.parseStr(panel)
for li in listparser.getElementsByTagName("a"):
url = "http://www.zseil.edu.pl/zastepstwa/{}".format(li.href)
url = url.replace("\\", "/")
z = requests.get(url)
z.encoding = "UTF-8"
if r.status_code != 200:
exit(r.status_code)
if url.endswith(".html"):
print("Zastepstwo w htmlu, parsuje! ({})".format(url))
date_fallback = url.split("-")
parse_text(totalOutput, z.text, date_fallback)
return totalOutput
def parse_text(o, text, date_fallback=None):
text_parser = AdvancedHTMLParser.AdvancedHTMLParser()
text_parser.parseStr(text)
for table in text_parser.getElementsByTagName("table"):
parse_table(o, table.getChildren(), text_parser, date_fallback)
break #NIE PARSUJ KOLEJNYCH TABEL
def parse_table(o, table, html_all=None, date_fallback=None):
output = dict()
cday = ""
cdays = []
for i,row in enumerate(table.getElementsByTagName("tr")):
if len(row.getChildren()) == 1: #Naglowek (ten z data)
day = row.getChildren()[i].innerText
if day.find("Dzie") != -1: #jest w th
print("<th>")
day = day.split(":")[1]
day = day.split("(")[0]
day = day.strip()
elif html_all != None: #jest w h1
day_ok = False
try:
print("<h1> - a")
day = html_all.getElementsByTagName("h1")[0].innerText
day = day.split(": ")[1]
day = day.split(" (")[0]
day = day.strip()
temp_fix_check = datetime.datetime.strptime(cday, "%d.%m.%Y").date().weekday() + 1
day_ok = True
except:
print("Fallback, bo ktos edytowal recznie html -.-")
if not day_ok:
try:
print("<h1> - b")
day = html_all.getElementsByTagName("h1")[1].innerText
day = day.split(": ")[1]
day = day.split(" (")[0]
day = day.strip()
temp_fix_check = datetime.datetime.strptime(cday, "%d.%m.%Y").date().weekday() + 1
except:
print("Fallback, bo ktos edytowal recznie html -.-")
day = "{}.{}.{}".format(date_fallback[2],date_fallback[1],date_fallback[0].split("/")[-1])
else:
print("Fail, nie znam tego formatu zastepstw")
return
print("Zastepstwa na dzien {}".format(day))
cday = day
cdays.append(day)
elif len(row.getChildren().getElementsByTagName("th")) == 0: #Nie naglowek (ten z nazwami)
lesson = row.getChildren()[0].innerText.replace("\n","")
oldTeacher = unidecode(row.getChildren()[1].innerText.replace("\n",""))
if row.getChildren()[2].innerText.find("IND*") != -1:
#Indywidualny
unit = row.getChildren()[2].innerText[row.getChildren()[2].innerText.find("IND*"):].replace("\n","")
unit = unit[4:]
group = -1
elif len(row.getChildren()[2].innerText.split("|")) == 2:
unit = row.getChildren()[2].innerText.split("|")[0].strip()
group = row.getChildren()[2].innerText.split("|")[1].strip()
#Dla konkretnej grupy
else:
#Dla całej klasy
unit = row.getChildren()[2].innerText.strip()
group = -1
subject = row.getChildren()[3].innerText.strip()
classroom = row.getChildren()[4].innerText.strip()
newTeacher = unidecode(row.getChildren()[5].innerText.strip())
comments = row.getChildren()[6].innerText.strip()
oldTeacherShort = unidecode(find_teacher_shortcut(oldTeacher))
newTeacherShort = find_teacher_shortcut(newTeacher)
if group != -1:
if group.find("Grupa-") != -1:
guessedGroup = group.split("Grupa-")[1]
elif group.find("r_") != -1:
guessedGroup = group.split("r_")[1]
else:
guessedGroup = -1
else:
guessedGroup = -1
if newTeacher.find("Uczniowie zwolnieni do domu") != -1 or newTeacher.find("Okienko dla uczniów") != -1 or newTeacher.find("Uczniowie przychodz") != -1: #TODO: Uczniowie przychodzą później
newTeacher = -1
#print("[ Zastepstwo ]")
#print("Godzina: {}".format(lesson))
#print("Za nauczyciela: {} ({})".format(oldTeacher, oldTeacherShort))
#print("Klasa: {}".format(unit))
#print("Grupa: {}".format(group))
#print("Nowy przedmiot: {}".format(subject))
#print("Sala: {}".format(classroom))
#print("Nowy nauczyciel: {} ({})".format(newTeacher, newTeacherShort))
#print("Uwagi: {}".format(comments))
#print()
d = datetime.datetime.strptime(cday, "%d.%m.%Y").date().weekday() + 1
if d not in output:
output[d] = dict()
output[d]['day'] = cday
if lesson not in output[d]:
output[d][lesson] = dict()
if unit not in output[d][lesson]:
output[d][lesson][unit] = []
temp = dict()
temp['subject'] = subject
temp['s'] = classroom
temp['oldTeacherLong'] = oldTeacher
temp['newTeacherLong'] = newTeacher
temp['oldTeacherShort'] = oldTeacherShort
temp['newTeacherShort'] = newTeacherShort
if group != -1:
temp['guessedGroup'] = guessedGroup
temp['comments'] = comments
output[d][lesson][unit].append(temp)
output['_min_date'] = min(cdays)
output['_max_date'] = max(cdays)
if max(cdays) in o:
o[max(cdays)].update(output)
else:
o[max(cdays)] = output
return o
def find_teacher_shortcut(name):
name = unidecode(html.unescape(name))
tm_f = open(cfg.teachermap_filename, "r")
teachermap = json.loads(tm_f.read())
for key in teachermap:
if teachermap[key].lower().find(name.lower()) != -1:
tm_f.close()
return key
tm_f.close()
return "-1"
def generate():
return search_for_overrides()
#generate()
#with open("zastepstwa.html","r", encoding="UTF-8") as inputData:
# totalOutput = {}
# date_fallback = "11.22.3333"
# parse_text(totalOutput, inputData.read(), date_fallback)
# #parseZastepstwa(inputData.read())
#inputData.close() | [
"datetime.datetime.strptime",
"AdvancedHTMLParser.AdvancedHTMLParser",
"html.unescape",
"requests.get"
] | [((390, 441), 'requests.get', 'requests.get', (['"""http://www.zseil.edu.pl/zastepstwa/"""'], {}), "('http://www.zseil.edu.pl/zastepstwa/')\n", (402, 441), False, 'import requests\n'), ((519, 558), 'AdvancedHTMLParser.AdvancedHTMLParser', 'AdvancedHTMLParser.AdvancedHTMLParser', ([], {}), '()\n', (556, 558), False, 'import AdvancedHTMLParser\n'), ((693, 732), 'AdvancedHTMLParser.AdvancedHTMLParser', 'AdvancedHTMLParser.AdvancedHTMLParser', ([], {}), '()\n', (730, 732), False, 'import AdvancedHTMLParser\n'), ((1258, 1297), 'AdvancedHTMLParser.AdvancedHTMLParser', 'AdvancedHTMLParser.AdvancedHTMLParser', ([], {}), '()\n', (1295, 1297), False, 'import AdvancedHTMLParser\n'), ((912, 929), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (924, 929), False, 'import requests\n'), ((5798, 5817), 'html.unescape', 'html.unescape', (['name'], {}), '(name)\n', (5811, 5817), False, 'import html\n'), ((4928, 4972), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['cday', '"""%d.%m.%Y"""'], {}), "(cday, '%d.%m.%Y')\n", (4954, 4972), False, 'import datetime\n'), ((2137, 2181), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['cday', '"""%d.%m.%Y"""'], {}), "(cday, '%d.%m.%Y')\n", (2163, 2181), False, 'import datetime\n'), ((2520, 2564), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['cday', '"""%d.%m.%Y"""'], {}), "(cday, '%d.%m.%Y')\n", (2546, 2564), False, 'import datetime\n')] |
#import sqlite3
from flask import (Blueprint, flash, g, redirect,
render_template, request, session, url_for)
import functools
from setting import Config
import hashlib
bp = Blueprint('login', __name__, url_prefix='/web')
@bp.route('/login', methods=('GET', 'POST'))
def login():
# 登录
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
error = None
if username != Config.USERNAME or password != Config.PASSWORD:
error = '错误的用户名或密码 Incorrect username or password.'
if error is None:
session.clear()
hash_session = username + \
hashlib.sha256(password.encode("utf8")).hexdigest()
hash_session = hashlib.sha256(
hash_session.encode("utf8")).hexdigest()
session['user_id'] = hash_session
return redirect(url_for('index.index'))
flash(error)
return render_template('web/login.html')
@bp.route('/logout')
def logout():
# 登出
session.clear()
flash('成功登出 Successfully log out.')
return redirect(url_for('index.index'))
def login_required(view):
# 登录验证,写成了修饰器
@functools.wraps(view)
def wrapped_view(**kwargs):
x = session.get('user_id')
hash_session = Config.USERNAME + \
hashlib.sha256(Config.PASSWORD.encode("utf8")).hexdigest()
hash_session = hashlib.sha256(hash_session.encode("utf8")).hexdigest()
if x != hash_session:
return redirect(url_for('login.login'))
g.user = {'user_id': x, 'username': Config.USERNAME}
return view(**kwargs)
return wrapped_view
| [
"flask.render_template",
"flask.flash",
"flask.session.get",
"functools.wraps",
"flask.url_for",
"flask.session.clear",
"flask.Blueprint",
"setting.Config.PASSWORD.encode"
] | [((194, 241), 'flask.Blueprint', 'Blueprint', (['"""login"""', '__name__'], {'url_prefix': '"""/web"""'}), "('login', __name__, url_prefix='/web')\n", (203, 241), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((984, 1017), 'flask.render_template', 'render_template', (['"""web/login.html"""'], {}), "('web/login.html')\n", (999, 1017), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1068, 1083), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (1081, 1083), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1088, 1123), 'flask.flash', 'flash', (['"""成功登出 Successfully log out."""'], {}), "('成功登出 Successfully log out.')\n", (1093, 1123), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1219, 1240), 'functools.wraps', 'functools.wraps', (['view'], {}), '(view)\n', (1234, 1240), False, 'import functools\n'), ((959, 971), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (964, 971), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1144, 1166), 'flask.url_for', 'url_for', (['"""index.index"""'], {}), "('index.index')\n", (1151, 1166), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1285, 1307), 'flask.session.get', 'session.get', (['"""user_id"""'], {}), "('user_id')\n", (1296, 1307), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((628, 643), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (641, 643), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((926, 948), 'flask.url_for', 'url_for', (['"""index.index"""'], {}), "('index.index')\n", (933, 948), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1561, 1583), 'flask.url_for', 'url_for', (['"""login.login"""'], {}), "('login.login')\n", (1568, 1583), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, session, url_for\n'), ((1379, 1409), 'setting.Config.PASSWORD.encode', 'Config.PASSWORD.encode', (['"""utf8"""'], {}), "('utf8')\n", (1401, 1409), False, 'from setting import Config\n')] |
from pfwrapper import PyPatternFinder
from collections import namedtuple
Pattern = namedtuple('Pattern', ['base', 'sens', 'pBase', 'pAll',
'pDX', 'pD_X', 'pX',
'pDY', 'pD_Y', 'pY',
'pDXY', 'pD_XY', 'pXY'])
DivergentPattern = namedtuple('DivergentPattern',
['base', 'sens',
'pDY', 'pD_Y',
'pDXY', 'pD_XY',
'score'])
class PatternFinder:
def __init__(self, root_params, leaf_params,
target_value, sensitive_var_ids):
self.pattern_finder = PyPatternFinder(root_params, leaf_params,
target_value, sensitive_var_ids)
self.num_visited = -1
def convert_pattern(self, p):
return DivergentPattern(base=p['base'], sens=p['sens'],
pDY=p['pDY'], pD_Y=p['pNotDY'],
pDXY=p['pDXY'], pD_XY=p['pNotDXY'],
score=p['kld'])
def find_any_divergent(self, threshold, num_patterns):
patterns, self.num_visited = self.pattern_finder.get_divergent_patterns(threshold, num_patterns, True)
patterns = [self.convert_pattern(p) for p in patterns]
return patterns
def find_any_discriminating(self, threshold, num_patterns):
patterns, self.num_visited = self.pattern_finder.get_discriminating_patterns(threshold, num_patterns, True)
patterns = [self.convert_pattern(p) for p in patterns]
return patterns
def get_divergent_patterns(self, threshold, num_patterns):
patterns, self.num_visited = self.pattern_finder.get_divergent_patterns(threshold, num_patterns)
patterns = [self.convert_pattern(p) for p in patterns]
return patterns
def get_discriminating_patterns(self, threshold, num_patterns):
patterns, self.num_visited = self.pattern_finder.get_discriminating_patterns(threshold, num_patterns)
patterns = [self.convert_pattern(p) for p in patterns]
return patterns
if __name__ == '__main__':
root_params = [0.1, 0.9]
leaf_params = [[0.3, 0.7, 0.1, 0.9],
[0.4, 0.6, 0.5, 0.5],
[0.2, 0.8, 0.3, 0.7],
[0.15, 0.85, 0.25, 0.75]]
target_value = 1
sensitive_var_ids = [0, 2]
threshold = 0.1
num_patterns = 3
pf = PatternFinder(root_params, leaf_params,
target_value, sensitive_var_ids)
dis_patterns = pf.get_discriminating_patterns(threshold, num_patterns)
print(dis_patterns)
print('visited nodes: %d\n'%pf.num_visited)
div_patterns = pf.get_divergent_patterns(threshold, num_patterns)
print(div_patterns)
print('visited nodes: %d\n'%pf.num_visited)
| [
"pfwrapper.PyPatternFinder",
"collections.namedtuple"
] | [((84, 210), 'collections.namedtuple', 'namedtuple', (['"""Pattern"""', "['base', 'sens', 'pBase', 'pAll', 'pDX', 'pD_X', 'pX', 'pDY', 'pD_Y', 'pY',\n 'pDXY', 'pD_XY', 'pXY']"], {}), "('Pattern', ['base', 'sens', 'pBase', 'pAll', 'pDX', 'pD_X', 'pX',\n 'pDY', 'pD_Y', 'pY', 'pDXY', 'pD_XY', 'pXY'])\n", (94, 210), False, 'from collections import namedtuple\n'), ((328, 421), 'collections.namedtuple', 'namedtuple', (['"""DivergentPattern"""', "['base', 'sens', 'pDY', 'pD_Y', 'pDXY', 'pD_XY', 'score']"], {}), "('DivergentPattern', ['base', 'sens', 'pDY', 'pD_Y', 'pDXY',\n 'pD_XY', 'score'])\n", (338, 421), False, 'from collections import namedtuple\n'), ((690, 764), 'pfwrapper.PyPatternFinder', 'PyPatternFinder', (['root_params', 'leaf_params', 'target_value', 'sensitive_var_ids'], {}), '(root_params, leaf_params, target_value, sensitive_var_ids)\n', (705, 764), False, 'from pfwrapper import PyPatternFinder\n')] |
#!/usr/bin/env python
import gtk, sys, os
mynumber="5555555555"
calnum=0
fakenum=0
def press(widget):
num = entry.get_text()
pnum = widget.get_label()
entry.set_text(num + pnum)
def send_press(widget):
print("Dialing: " + entry.get_text())
def add_call(widget):
callnum = entry.get_text()
entry.set_text("")
def add_fake(widget):
fakenum = entry.get_text()
entry.set_text("")
win = gtk.Window()
win.connect('destroy', lambda w: gtk.main_quit())
box = gtk.VBox()
win.add(box)
entry = gtk.Entry()
box.pack_start(entry, False)
table = gtk.Table(2,2, gtk.TRUE)
a = [1,2,3,4,5,6,7,8,9,"#",0,"*"]
x = 0
y = 0
for i in a:
button = gtk.Button(str(i))
button.connect("clicked", press)
table.attach(button, x, x+1, y, y+1)
x+=1
if x > 2:
x=0
y+=1
box.pack_start(table)
box2 = gtk.HBox()
box.pack_start(box2)
call_button = gtk.Button("Add Call#")
call_button.connect("clicked", add_call)
box2.pack_start(call_button)
fakebutton = gtk.Button("Add Fake#")
fakebutton.connect("clicked", add_fake)
box2.pack_start(fakebutton)
send = gtk.Button("SEND")
send.connect("clicked", send_press)
box.pack_start(send)
win.show_all()
gtk.main() | [
"gtk.Table",
"gtk.Entry",
"gtk.Button",
"gtk.Window",
"gtk.main_quit",
"gtk.VBox",
"gtk.HBox",
"gtk.main"
] | [((396, 408), 'gtk.Window', 'gtk.Window', ([], {}), '()\n', (406, 408), False, 'import gtk, sys, os\n'), ((466, 476), 'gtk.VBox', 'gtk.VBox', ([], {}), '()\n', (474, 476), False, 'import gtk, sys, os\n'), ((499, 510), 'gtk.Entry', 'gtk.Entry', ([], {}), '()\n', (508, 510), False, 'import gtk, sys, os\n'), ((549, 574), 'gtk.Table', 'gtk.Table', (['(2)', '(2)', 'gtk.TRUE'], {}), '(2, 2, gtk.TRUE)\n', (558, 574), False, 'import gtk, sys, os\n'), ((797, 807), 'gtk.HBox', 'gtk.HBox', ([], {}), '()\n', (805, 807), False, 'import gtk, sys, os\n'), ((843, 866), 'gtk.Button', 'gtk.Button', (['"""Add Call#"""'], {}), "('Add Call#')\n", (853, 866), False, 'import gtk, sys, os\n'), ((951, 974), 'gtk.Button', 'gtk.Button', (['"""Add Fake#"""'], {}), "('Add Fake#')\n", (961, 974), False, 'import gtk, sys, os\n'), ((1051, 1069), 'gtk.Button', 'gtk.Button', (['"""SEND"""'], {}), "('SEND')\n", (1061, 1069), False, 'import gtk, sys, os\n'), ((1144, 1154), 'gtk.main', 'gtk.main', ([], {}), '()\n', (1152, 1154), False, 'import gtk, sys, os\n'), ((442, 457), 'gtk.main_quit', 'gtk.main_quit', ([], {}), '()\n', (455, 457), False, 'import gtk, sys, os\n')] |
from simple_rl.mdp.oomdp.OOMDPStateClass import OOMDPState
class TrenchOOMDPState(OOMDPState):
''' Class for Trench World States '''
def __init__(self, objects):
OOMDPState.__init__(self, objects=objects)
def get_agent_x(self):
return self.objects["agent"][0]["x"]
def get_agent_y(self):
return self.objects["agent"][0]["y"]
def __hash__(self):
state_hash = str(self.get_agent_x()) + str(self.get_agent_y()) + str(self.objects["agent"][0]["dx"] + 1)\
+ str(self.objects["agent"][0]["dy"] + 1) + str(self.objects["agent"][0]["dest_x"])\
+ str(self.objects["agent"][0]["dest_x"]) + str(self.objects["agent"][0]["dest_y"]) + \
str(self.objects["agent"][0]["has_block"]) + "00"
for b in self.objects["block"]:
state_hash += str(b["x"]) + str(b["y"])
state_hash += "00"
for l in self.objects["lava"]:
state_hash += str(l["x"]) + str(l["y"])
return int(state_hash)
def __eq__(self, other_trench_state):
return hash(self) == hash(other_trench_state)
| [
"simple_rl.mdp.oomdp.OOMDPStateClass.OOMDPState.__init__"
] | [((180, 222), 'simple_rl.mdp.oomdp.OOMDPStateClass.OOMDPState.__init__', 'OOMDPState.__init__', (['self'], {'objects': 'objects'}), '(self, objects=objects)\n', (199, 222), False, 'from simple_rl.mdp.oomdp.OOMDPStateClass import OOMDPState\n')] |
import json
file = open('buildings.json', 'r')
buildings = json.load(file)['data']
output = []
for building in buildlings:
temp = {}
temp["id"] = building["number"]
temp["name"] = {}
temp["name"]["value"] = building["name"]
output.append(temp) | [
"json.load"
] | [((60, 75), 'json.load', 'json.load', (['file'], {}), '(file)\n', (69, 75), False, 'import json\n')] |
import src.library as s_lib
# s_lib.update_team_table()
# s_lib.test_func()
# s_lib.update_result()
st = '2018-10-29'
end = '2019-05-23'
s_lib.update_period_result(st, end)
| [
"src.library.update_period_result"
] | [((140, 175), 'src.library.update_period_result', 's_lib.update_period_result', (['st', 'end'], {}), '(st, end)\n', (166, 175), True, 'import src.library as s_lib\n')] |
#!/usr/bin/env python
import os
import sys
import argparse
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import messagebird
parser = argparse.ArgumentParser()
parser.add_argument('--accessKey', help='Access key for MessageBird API.', type=str, required=True)
parser.add_argument('--phoneNumber', help='Phone number.', type=str, required=True)
args = vars(parser.parse_args())
try:
# Create a MessageBird client with the specified accessKey.
client = messagebird.Client(args['accessKey'])
# Fetching all purchased phone numbers.
item = client.purchased_number(args['phoneNumber'])
# Print the object information.
print('\nThe following information was returned as a %s object:\n' % item.__class__)
if item is not None:
print(' {')
print(' number : %s' % item.number)
print(' country : %s' % item.country)
print(' region : %s' % item.region)
print(' locality : %s' % item.locality)
print(' features : %s' % item.features)
print(' tags : %s' % item.tags)
print(' type : %s' % item.type)
print(' status : %s' % item.status)
print(' },')
else:
print(' With an empty response.')
except messagebird.client.ErrorException as e:
print('\nAn error occurred while fetching all purchased phone numbers:\n')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s' % error.parameter)
print(' type : %s' % error.__class__)
except requests.exceptions.HTTPError as e:
print('\nAn HTTP exception occurred while fetching all purchased phone numbers:')
print(' ', e)
print(' Http request body: ', e.request.body)
print(' Http response status: ', e.response.status_code)
print(' Http response body: ', e.response.content.decode())
except Exception as e:
print('\nAn ', e.__class__, ' exception occurred while :')
print(e)
| [
"messagebird.Client",
"os.path.dirname",
"argparse.ArgumentParser"
] | [((168, 193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (191, 193), False, 'import argparse\n'), ((494, 531), 'messagebird.Client', 'messagebird.Client', (["args['accessKey']"], {}), "(args['accessKey'])\n", (512, 531), False, 'import messagebird\n'), ((105, 130), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (120, 130), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django_redis import get_redis_connection
from uuid import uuid4
logger = logging.getLogger(__name__)
class LatestViewedStudies(object):
""" Interfaces with Redis to keep a list of latest viewed studies """
def __init__(self, user, n=5, *args, **kwargs):
super(LatestViewedStudies, self).__init__(*args, **kwargs)
self._end = n - 1
self._redis = get_redis_connection(settings.EDD_LATEST_CACHE)
self._user = user
def __iter__(self):
return map(self._decode, iter(self._redis.lrange(self._key(), 0, self._end)))
def _key(self):
return '%(module)s.%(klass)s:%(user)s' % {
'module': __name__,
'klass': self.__class__.__name__,
'user': self._user.username,
}
def _decode(self, value):
return value.decode('utf-8')
def remove_study(self, study):
key = self._key()
if study:
self._redis.lrem(key, 0, study.pk)
def viewed_study(self, study):
key = self._key()
if study:
# Don't want to put duplicates in the list
self._redis.lrem(key, 0, study.pk)
# Push study pk to front of list
self._redis.lpush(key, study.pk)
# Trim list to size
self._redis.ltrim(key, 0, self._end)
class ScratchStorage(object):
""" Interfaces with Redis to keep scratch storage """
def __init__(self, *args, **kwargs):
super(ScratchStorage, self).__init__(*args, **kwargs)
self._redis = get_redis_connection(settings.EDD_LATEST_CACHE)
def _key(self, name=None):
return '%(module)s.%(klass)s:%(name)s' % {
'module': __name__,
'klass': self.__class__.__name__,
'name': uuid4() if name is None else name,
}
def delete(self, key):
self._redis.delete(key)
def load(self, key):
return self._redis.get(key)
def save(self, data, name=None, expires=None):
key = self._key(name)
expires = 60 * 60 * 24 if expires is None else expires
self._redis.set(key, data, nx=True, ex=expires)
return key
| [
"logging.getLogger",
"django_redis.get_redis_connection",
"uuid.uuid4"
] | [((154, 181), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'import logging\n'), ((461, 508), 'django_redis.get_redis_connection', 'get_redis_connection', (['settings.EDD_LATEST_CACHE'], {}), '(settings.EDD_LATEST_CACHE)\n', (481, 508), False, 'from django_redis import get_redis_connection\n'), ((1611, 1658), 'django_redis.get_redis_connection', 'get_redis_connection', (['settings.EDD_LATEST_CACHE'], {}), '(settings.EDD_LATEST_CACHE)\n', (1631, 1658), False, 'from django_redis import get_redis_connection\n'), ((1840, 1847), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1845, 1847), False, 'from uuid import uuid4\n')] |
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import itertools
import os
import sys
import warnings
try:
unichr
except NameError:
unichr = chr
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.normpath(os.path.join(TOOLS_DIR, '..'))
C_SOURCE_FILE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-conversions.inc.h')
def parse_unicode_sequence(raw_data):
"""
Parse unicode sequence from raw data.
:param raw_data: Contains the unicode sequence which needs to parse.
:return: The parsed unicode sequence.
"""
result = ''
for unicode_char in raw_data.split(' '):
if unicode_char == '':
continue
# Convert it to unicode code point (from hex value without 0x prefix)
result += unichr(int(unicode_char, 16))
return result
def read_case_mappings(unicode_data_file, special_casing_file):
"""
Read the corresponding unicode values of lower and upper case letters and store these in tables.
:param unicode_data_file: Contains the default case mappings (one-to-one mappings).
:param special_casing_file: Contains additional informative case mappings that are either not one-to-one
or which are context-sensitive.
:return: Upper and lower case mappings.
"""
lower_case_mapping = CaseMapping()
upper_case_mapping = CaseMapping()
# Add one-to-one mappings
with open(unicode_data_file) as unicode_data:
unicode_data_reader = csv.reader(unicode_data, delimiter=';')
for line in unicode_data_reader:
letter_id = int(line[0], 16)
# Skip supplementary planes and ascii chars
if letter_id >= 0x10000 or letter_id < 128:
continue
capital_letter = line[12]
small_letter = line[13]
if capital_letter:
upper_case_mapping.add(letter_id, parse_unicode_sequence(capital_letter))
if small_letter:
lower_case_mapping.add(letter_id, parse_unicode_sequence(small_letter))
# Update the conversion tables with the special cases
with open(special_casing_file) as special_casing:
special_casing_reader = csv.reader(special_casing, delimiter=';')
for line in special_casing_reader:
# Skip comment sections and empty lines
if not line or line[0].startswith('#'):
continue
# Replace '#' character with empty string
for idx, i in enumerate(line):
if i.find('#') >= 0:
line[idx] = ''
letter_id = int(line[0], 16)
condition_list = line[4]
# Skip supplementary planes, ascii chars, and condition_list
if letter_id >= 0x10000 or letter_id < 128 or condition_list:
continue
small_letter = parse_unicode_sequence(line[1])
capital_letter = parse_unicode_sequence(line[3])
lower_case_mapping.add(letter_id, small_letter)
upper_case_mapping.add(letter_id, capital_letter)
return lower_case_mapping, upper_case_mapping
class CaseMapping(dict):
"""Class defines an informative, default mapping."""
def __init__(self):
"""Initialize the case mapping table."""
self._conversion_table = {}
def add(self, letter_id, mapped_value):
"""
Add mapped value of the unicode letter.
:param letter_id: An integer, representing the unicode code point of the character.
:param mapped_value: Corresponding character of the case type.
"""
self._conversion_table[letter_id] = mapped_value
def remove(self, letter_id):
"""
Remove mapping from the conversion table.
:param letter_id: An integer, representing the unicode code point of the character.
"""
del self._conversion_table[letter_id]
def get_value(self, letter_id):
"""
Get the mapped value of the given unicode character.
:param letter_id: An integer, representing the unicode code point of the character.
:return: The mapped value of the character.
"""
if self.contains(letter_id):
return self._conversion_table[letter_id]
return None
def get_conversion_distance(self, letter_id):
"""
Calculate the distance between the unicode character and its mapped value
(only needs and works with one-to-one mappings).
:param letter_id: An integer, representing the unicode code point of the character.
:return: The conversion distance.
"""
mapped_value = self.get_value(letter_id)
if mapped_value and len(mapped_value) == 1:
return ord(mapped_value) - letter_id
return None
def is_bidirectional_conversion(self, letter_id, other_case_mapping):
"""
Check that two unicode value are also a mapping value of each other.
:param letter_id: An integer, representing the unicode code point of the character.
:param other_case_mapping: Comparable case mapping table which possible contains
the return direction of the conversion.
:return: True, if it's a reverible conversion, false otherwise.
"""
if not self.contains(letter_id):
return False
# Check one-to-one mapping
mapped_value = self.get_value(letter_id)
if len(mapped_value) > 1:
return False
# Check two way conversions
mapped_value_id = ord(mapped_value)
if other_case_mapping.get_value(mapped_value_id) != unichr(letter_id):
return False
return True
def contains(self, letter_id):
"""
Check that a unicode character is in the conversion table.
:param letter_id: An integer, representing the unicode code point of the character.
:return: True, if it contains the character, false otherwise.
"""
if letter_id in self._conversion_table:
return True
return False
def get_table(self):
return self._conversion_table
def extract_ranges(self, other_case_mapping=None):
"""
Extract ranges from case mappings
(the second param is optional, if it's not empty, a range will contains bidirectional conversions only).
:param letter_id: An integer, representing the unicode code point of the character.
:param other_case_mapping: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with the start points and their mapped value, and another table with the lengths of the ranges.
"""
in_range = False
range_position = -1
ranges = []
range_lengths = []
for letter_id in sorted(self._conversion_table.keys()):
prev_letter_id = letter_id - 1
# One-way conversions
if other_case_mapping is None:
if len(self.get_value(letter_id)) > 1:
in_range = False
continue
if not self.contains(prev_letter_id) or len(self.get_value(prev_letter_id)) > 1:
in_range = False
continue
# Two way conversions
else:
if not self.is_bidirectional_conversion(letter_id, other_case_mapping):
in_range = False
continue
if not self.is_bidirectional_conversion(prev_letter_id, other_case_mapping):
in_range = False
continue
conv_distance = self.get_conversion_distance(letter_id)
prev_conv_distance = self.get_conversion_distance(prev_letter_id)
if (conv_distance != prev_conv_distance):
in_range = False
continue
if in_range:
range_lengths[range_position] += 1
else:
in_range = True
range_position += 1
# Add the start point of the range and its mapped value
ranges.extend([prev_letter_id, ord(self.get_value(prev_letter_id))])
range_lengths.append(2)
# Remove all ranges from the case mapping table.
index = 0
while index != len(ranges):
range_length = range_lengths[index // 2]
for incr in range(range_length):
self.remove(ranges[index] + incr)
if other_case_mapping is not None:
other_case_mapping.remove(ranges[index + 1] + incr)
index += 2
return ranges, range_lengths
def extract_character_pair_ranges(self, other_case_mapping):
"""
Extract two or more character pairs from the case mapping tables.
:param other_case_mapping: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with the start points, and another table with the lengths of the ranges.
"""
start_points = []
lengths = []
in_range = False
element_counter = -1
for letter_id in sorted(self._conversion_table.keys()):
# Only extract character pairs
if not self.is_bidirectional_conversion(letter_id, other_case_mapping):
in_range = False
continue
if self.get_value(letter_id) == unichr(letter_id + 1):
prev_letter_id = letter_id - 2
if not self.is_bidirectional_conversion(prev_letter_id, other_case_mapping):
in_range = False
if in_range:
lengths[element_counter] += 2
else:
element_counter += 1
start_points.append(letter_id)
lengths.append(2)
in_range = True
else:
in_range = False
# Remove all founded case mapping from the conversion tables after the scanning method
idx = 0
while idx != len(start_points):
letter_id = start_points[idx]
conv_length = lengths[idx]
for incr in range(0, conv_length, 2):
self.remove(letter_id + incr)
other_case_mapping.remove(letter_id + 1 + incr)
idx += 1
return start_points, lengths
def extract_character_pairs(self, other_case_mapping):
"""
Extract character pairs. Check that two unicode value are also a mapping value of each other.
:param other_case_mapping: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with character pairs.
"""
character_pairs = []
for letter_id in sorted(self._conversion_table.keys()):
if self.is_bidirectional_conversion(letter_id, other_case_mapping):
mapped_value = self.get_value(letter_id)
character_pairs.extend([letter_id, ord(mapped_value)])
# Remove character pairs from case mapping tables
self.remove(letter_id)
other_case_mapping.remove(ord(mapped_value))
return character_pairs
def extract_special_ranges(self):
"""
Extract special ranges. It contains that ranges of one-to-two mappings where the second character
of the mapped values are equals and the other characters are following each other.
eg.: \u1f80 and \u1f81 will be in one range becase their upper-case values are \u1f08\u0399 and \u1f09\u0399
:return: A table with the start points and their mapped values, and a table with the lengths of the ranges.
"""
special_ranges = []
special_range_lengths = []
range_position = -1
for letter_id in sorted(self._conversion_table.keys()):
mapped_value = self.get_value(letter_id)
if len(mapped_value) != 2:
continue
prev_letter_id = letter_id - 1
if not self.contains(prev_letter_id):
in_range = False
continue
prev_mapped_value = self.get_value(prev_letter_id)
if len(prev_mapped_value) != 2:
continue
if prev_mapped_value[1] != mapped_value[1]:
continue
if (ord(prev_mapped_value[0]) - prev_letter_id) != (ord(mapped_value[0]) - letter_id):
in_range = False
continue
if in_range:
special_range_lengths[range_position] += 1
else:
range_position += 1
in_range = True
special_ranges.extend([prev_letter_id, ord(prev_mapped_value[0]), ord(prev_mapped_value[1])])
special_range_lengths.append(1)
# Remove special ranges from the conversion table
idx = 0
while idx != len(special_ranges):
range_length = special_range_lengths[idx // 3]
letter_id = special_ranges[idx]
for incr in range(range_length):
self.remove(special_ranges[idx] + incr)
idx += 3
return special_ranges, special_range_lengths
def extract_conversions(self):
"""
Extract conversions. It provide the full (or remained) case mappings from the table.
The counter table contains the information of how much one-to-one, one-to-two or one-to-three mappings
exists successively in the conversion table.
:return: A table with conversions, and a table with counters.
"""
unicodes = [[], [], []]
unicode_lengths = [0, 0, 0]
# 1 to 1 byte
for letter_id in sorted(self._conversion_table.keys()):
mapped_value = self.get_value(letter_id)
if len(mapped_value) != 1:
continue
unicodes[0].extend([letter_id, ord(mapped_value)])
self.remove(letter_id)
# 1 to 2 bytes
for letter_id in sorted(self._conversion_table.keys()):
mapped_value = self.get_value(letter_id)
if len(mapped_value) != 2:
continue
unicodes[1].extend([letter_id, ord(mapped_value[0]), ord(mapped_value[1])])
self.remove(letter_id)
# 1 to 3 bytes
for letter_id in sorted(self._conversion_table.keys()):
mapped_value = self.get_value(letter_id)
if len(mapped_value) != 3:
continue
unicodes[2].extend([letter_id, ord(mapped_value[0]), ord(mapped_value[1]), ord(mapped_value[2])])
self.remove(letter_id)
unicode_lengths = [int(len(unicodes[0]) / 2), int(len(unicodes[1]) / 3), int(len(unicodes[2]) / 4)]
return list(itertools.chain.from_iterable(unicodes)), unicode_lengths
def regroup(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def hex_format(ch):
if isinstance(ch, str):
ch = ord(ch)
return "0x{:04x}".format(ch)
def format_code(code, indent):
lines = []
# convert all characters to hex format
converted_code = map(hex_format, code)
# 10 hex number per line
for line in regroup(", ".join(converted_code), 10 * 8):
lines.append((' ' * indent) + line.strip())
return "\n".join(lines)
def create_c_format_table(type_name, array_name, table, description=""):
return """{DESC}
static const {TYPE} jerry_{NAME}[] JERRY_CONST_DATA =
{{
{TABLE}
}};
""".format(DESC=description, TYPE=type_name, NAME=array_name, TABLE=format_code(table, 1))
def copy_tables_to_c_source(gen_tables, c_source):
data = []
header = """/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is automatically generated by the {SCRIPT} script. Do not edit!
*/
""".format(SCRIPT=os.path.basename(__file__))
data.append(header)
character_case_ranges = gen_tables.get_character_case_ranges()
character_pair_ranges = gen_tables.get_character_pair_ranges()
character_pairs = gen_tables.get_character_pairs()
upper_case_special_ranges = gen_tables.get_upper_case_special_ranges()
lower_case_ranges = gen_tables.get_lower_case_ranges()
lower_case_conversions = gen_tables.get_lower_case_conversions()
upper_case_conversions = gen_tables.get_upper_case_conversions()
description = "/* Contains start points of character case ranges (these are bidirectional conversions). */"
data.append(create_c_format_table('uint16_t', 'character_case_ranges',
character_case_ranges[0],
description))
description = "/* Interval lengths of start points in `character_case_ranges` table. */"
data.append(create_c_format_table('uint8_t',
'character_case_range_lengths',
character_case_ranges[1],
description))
description = "/* Contains the start points of bidirectional conversion ranges. */"
data.append(create_c_format_table('uint16_t',
'character_pair_ranges',
character_pair_ranges[0],
description))
description = "/* Interval lengths of start points in `character_pair_ranges` table. */"
data.append(create_c_format_table('uint8_t',
'character_pair_range_lengths',
character_pair_ranges[1],
description))
description = "/* Contains lower/upper case bidirectional conversion pairs. */"
data.append(create_c_format_table('uint16_t',
'character_pairs',
character_pairs,
description))
description = """/* Contains start points of one-to-two uppercase ranges where the second character
* is always the same.
*/"""
data.append(create_c_format_table('uint16_t',
'upper_case_special_ranges',
upper_case_special_ranges[0],
description))
description = "/* Interval lengths for start points in `upper_case_special_ranges` table. */"
data.append(create_c_format_table('uint8_t',
'upper_case_special_range_lengths',
upper_case_special_ranges[1],
description))
description = "/* Contains start points of lowercase ranges. */"
data.append(create_c_format_table('uint16_t', 'lower_case_ranges', lower_case_ranges[0], description))
description = "/* Interval lengths for start points in `lower_case_ranges` table. */"
data.append(create_c_format_table('uint8_t', 'lower_case_range_lengths', lower_case_ranges[1], description))
description = "/* The remaining lowercase conversions. The lowercase variant can be one-to-three character long. */"
data.append(create_c_format_table('uint16_t',
'lower_case_conversions',
lower_case_conversions[0],
description))
description = "/* Number of one-to-one, one-to-two, and one-to-three lowercase conversions. */"
data.append(create_c_format_table('uint8_t',
'lower_case_conversion_counters',
lower_case_conversions[1],
description))
description = "/* The remaining uppercase conversions. The uppercase variant can be one-to-three character long. */"
data.append(create_c_format_table('uint16_t',
'upper_case_conversions',
upper_case_conversions[0],
description))
description = "/* Number of one-to-one, one-to-two, and one-to-three lowercase conversions. */"
data.append(create_c_format_table('uint8_t',
'upper_case_conversion_counters',
upper_case_conversions[1],
description))
with open(c_source, 'w') as genereted_source:
genereted_source.write(''.join(data))
class GenTables(object):
"""Class defines an informative, default generated tables."""
def __init__(self, lower_case_table, upper_case_table):
"""
Generate the extracted tables from the given case mapping tables.
:param lower_case_table: Lower-case mappings.
:param upper_case_table: Upper-case mappings.
"""
self._character_case_ranges = lower_case_table.extract_ranges(upper_case_table)
self._character_pair_ranges = lower_case_table.extract_character_pair_ranges(upper_case_table)
self._character_pairs = lower_case_table.extract_character_pairs(upper_case_table)
self._upper_case_special_ranges = upper_case_table.extract_special_ranges()
self._lower_case_ranges = lower_case_table.extract_ranges()
self._lower_case_conversions = lower_case_table.extract_conversions()
self._upper_case_conversions = upper_case_table.extract_conversions()
if lower_case_table.get_table():
warnings.warn('Not all elements extracted from the lowercase conversion table!')
if upper_case_table.get_table():
warnings.warn('Not all elements extracted from the uppercase conversion table!')
def get_character_case_ranges(self):
return self._character_case_ranges
def get_character_pair_ranges(self):
return self._character_pair_ranges
def get_character_pairs(self):
return self._character_pairs
def get_upper_case_special_ranges(self):
return self._upper_case_special_ranges
def get_lower_case_ranges(self):
return self._lower_case_ranges
def get_lower_case_conversions(self):
return self._lower_case_conversions
def get_upper_case_conversions(self):
return self._upper_case_conversions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--unicode-data',
metavar='FILE',
action='store',
required=True,
help='specify the unicode data file')
parser.add_argument('--special-casing',
metavar='FILE',
action='store',
required=True,
help='specify the special casing file')
parser.add_argument('--c-source',
metavar='FILE',
action='store',
default=C_SOURCE_FILE,
help='specify the output c source (default: %(default)s)')
script_args = parser.parse_args()
if not os.path.isfile(script_args.unicode_data) or not os.access(script_args.unicode_data, os.R_OK):
print('The %s file is missing or not readable!' % script_args.unicode_data)
sys.exit(1)
if not os.path.isfile(script_args.special_casing) or not os.access(script_args.special_casing, os.R_OK):
print('The %s file is missing or not readable!' % script_args.special_casing)
sys.exit(1)
lower_case_table, upper_case_table = read_case_mappings(script_args.unicode_data, script_args.special_casing)
gen_tables = GenTables(lower_case_table, upper_case_table)
copy_tables_to_c_source(gen_tables, script_args.c_source)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"os.access",
"os.path.join",
"os.path.isfile",
"itertools.chain.from_iterable",
"os.path.basename",
"sys.exit",
"os.path.abspath",
"warnings.warn",
"csv.reader"
] | [((907, 980), 'os.path.join', 'os.path.join', (['PROJECT_DIR', '"""jerry-core/lit/lit-unicode-conversions.inc.h"""'], {}), "(PROJECT_DIR, 'jerry-core/lit/lit-unicode-conversions.inc.h')\n", (919, 980), False, 'import os\n'), ((802, 827), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (817, 827), False, 'import os\n'), ((860, 889), 'os.path.join', 'os.path.join', (['TOOLS_DIR', '""".."""'], {}), "(TOOLS_DIR, '..')\n", (872, 889), False, 'import os\n'), ((23739, 23764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (23762, 23764), False, 'import argparse\n'), ((2134, 2173), 'csv.reader', 'csv.reader', (['unicode_data'], {'delimiter': '""";"""'}), "(unicode_data, delimiter=';')\n", (2144, 2173), False, 'import csv\n'), ((2855, 2896), 'csv.reader', 'csv.reader', (['special_casing'], {'delimiter': '""";"""'}), "(special_casing, delimiter=';')\n", (2865, 2896), False, 'import csv\n'), ((24703, 24714), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24711, 24714), False, 'import sys\n'), ((24919, 24930), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24927, 24930), False, 'import sys\n'), ((17233, 17259), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (17249, 17259), False, 'import os\n'), ((22910, 22995), 'warnings.warn', 'warnings.warn', (['"""Not all elements extracted from the lowercase conversion table!"""'], {}), "('Not all elements extracted from the lowercase conversion table!'\n )\n", (22923, 22995), False, 'import warnings\n'), ((23044, 23129), 'warnings.warn', 'warnings.warn', (['"""Not all elements extracted from the uppercase conversion table!"""'], {}), "('Not all elements extracted from the uppercase conversion table!'\n )\n", (23057, 23129), False, 'import warnings\n'), ((24517, 24557), 'os.path.isfile', 'os.path.isfile', (['script_args.unicode_data'], {}), '(script_args.unicode_data)\n', (24531, 24557), False, 'import os\n'), ((24565, 24609), 'os.access', 'os.access', (['script_args.unicode_data', 'os.R_OK'], {}), '(script_args.unicode_data, os.R_OK)\n', (24574, 24609), False, 'import os\n'), ((24727, 24769), 'os.path.isfile', 'os.path.isfile', (['script_args.special_casing'], {}), '(script_args.special_casing)\n', (24741, 24769), False, 'import os\n'), ((24777, 24823), 'os.access', 'os.access', (['script_args.special_casing', 'os.R_OK'], {}), '(script_args.special_casing, os.R_OK)\n', (24786, 24823), False, 'import os\n'), ((15621, 15660), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['unicodes'], {}), '(unicodes)\n', (15650, 15660), False, 'import itertools\n')] |
# Solution of;
# Project Euler Problem 465: Polar polygons
# https://projecteuler.net/problem=465
#
# The kernel of a polygon is defined by the set of points from which the
# entire polygon's boundary is visible. We define a polar polygon as a polygon
# for which the origin is strictly contained inside its kernel. For this
# problem, a polygon can have collinear consecutive vertices. However, a
# polygon still cannot have self-intersection and cannot have zero area. For
# example, only the first of the following is a polar polygon (the kernels of
# the second, third, and fourth do not strictly contain the origin, and the
# fifth does not have a kernel at all):Notice that the first polygon has three
# consecutive collinear vertices. Let P(n) be the number of polar polygons
# such that the vertices (x, y) have integer coordinates whose absolute values
# are not greater than n. Note that polygons should be counted as different if
# they have different set of edges, even if they enclose the same area. For
# example, the polygon with vertices [(0,0),(0,3),(1,1),(3,0)] is distinct
# from the polygon with vertices [(0,0),(0,3),(1,1),(3,0),(1,0)]. For example,
# P(1) = 131, P(2) = 1648531, P(3) = 1099461296175 and P(343) mod 1 000 000
# 007 = 937293740. Find P(713) mod 1 000 000 007.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 465
timed.caller(dummy, n, i, prob_id)
| [
"timed.caller"
] | [((1483, 1517), 'timed.caller', 'timed.caller', (['dummy', 'n', 'i', 'prob_id'], {}), '(dummy, n, i, prob_id)\n', (1495, 1517), False, 'import timed\n')] |
#!/usr/bin/python
"""
GFS.py - GFS GRIB2 class
"""
# Stock modules
import sys
import os
import re
import logging
import traceback
import datetime
import collections
import operator
# Local modules
import error_codes
import utils
from dataset import dataset
LOG = logging.getLogger(__name__)
# gfs.t18z.pgrb2.1p00.f000.20120818
regexp="^gfs\\.t(?P<hour>\\d{2})z\\.pgrb2\\.1p00\\.f(?P<fhour>\\d{3})\\.(?P<runDTG>\\d{8})$"
schema={
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties": {
"hour":{ "type":"string" },
"fhour":{ "type":"string" },
"runDTG":{ "type":"string" },
}
}
class gfs(dataset):
def __init__(self,filepath):
self.regexp=regexp
self.schema=schema
super(gfs,self).__init__(filepath)
self.properties['hour']=int(self.properties['hour'])
self.properties['fhour']=int(self.properties['fhour'])
DTS="{}{}".format(self.properties['runDTG'],self.properties['hour'])
self.properties['runDTG']=datetime.datetime.strptime(DTS,"%Y%m%d%H")
def latestGFS(config,metadata,dataname):
try:
latestGFS=metadata[dataname].pop(0)
for gfs in metadata[dataname]:
runDTG=gfs.get('runDTG')
if runDTG > latestGFS.get('runDTG'):
latestGFS=gfs
except:
msg="Problem filtering dataname object list".format(dataname)
utils.error(LOG,msg,error_codes.EX_DATAERR)
return([latestGFS])
| [
"logging.getLogger",
"utils.error",
"datetime.datetime.strptime"
] | [((267, 294), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (284, 294), False, 'import logging\n'), ((1042, 1085), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['DTS', '"""%Y%m%d%H"""'], {}), "(DTS, '%Y%m%d%H')\n", (1068, 1085), False, 'import datetime\n'), ((1432, 1477), 'utils.error', 'utils.error', (['LOG', 'msg', 'error_codes.EX_DATAERR'], {}), '(LOG, msg, error_codes.EX_DATAERR)\n', (1443, 1477), False, 'import utils\n')] |
from __future__ import unicode_literals
import frappe
from frappe.utils import date_diff, nowdate, add_days, format_datetime
from datetime import timedelta, datetime
def expire_doc_mail_notify():
# stuff to do every 10 minutes
today = nowdate()
employee_documents = frappe.db.get_list('Employee Documents',
fields=['parent', 'doc_name', 'reminder_days', 'expire_date'],
filters={"expire_date": (">", today)})
for record in employee_documents:
expire_date = record.get('expire_date') - timedelta(days=record.get('reminder_days'))
if expire_date == datetime.today().date():
employee_id = frappe.get_doc("Employee", {"name": record.get('parent')}).as_dict()
recipients = []
if employee_id.prefered_email:
recipients.append(employee_id.prefered_email)
if employee_id.company_email:
recipients.append(employee_id.company_email)
if employee_id.personal_email:
recipients.append(employee_id.personal_email)
for company in frappe.db.get_all("Company"):
company = frappe.get_doc("Company", company.name)
recipients.append(company.email)
if recipients:
frappe.sendmail(recipients=recipients, subject=frappe._('Document Will Expire Soon!'),
message='Your document {0} will expire soon\n Please Renew asap \n Kind Regards'.format(
record.get('doc_name')))
| [
"frappe.db.get_all",
"frappe._",
"frappe.utils.nowdate",
"frappe.get_doc",
"datetime.datetime.today",
"frappe.db.get_list"
] | [((245, 254), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (252, 254), False, 'from frappe.utils import date_diff, nowdate, add_days, format_datetime\n'), ((280, 426), 'frappe.db.get_list', 'frappe.db.get_list', (['"""Employee Documents"""'], {'fields': "['parent', 'doc_name', 'reminder_days', 'expire_date']", 'filters': "{'expire_date': ('>', today)}"}), "('Employee Documents', fields=['parent', 'doc_name',\n 'reminder_days', 'expire_date'], filters={'expire_date': ('>', today)})\n", (298, 426), False, 'import frappe\n'), ((1157, 1185), 'frappe.db.get_all', 'frappe.db.get_all', (['"""Company"""'], {}), "('Company')\n", (1174, 1185), False, 'import frappe\n'), ((1213, 1252), 'frappe.get_doc', 'frappe.get_doc', (['"""Company"""', 'company.name'], {}), "('Company', company.name)\n", (1227, 1252), False, 'import frappe\n'), ((669, 685), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (683, 685), False, 'from datetime import timedelta, datetime\n'), ((1392, 1430), 'frappe._', 'frappe._', (['"""Document Will Expire Soon!"""'], {}), "('Document Will Expire Soon!')\n", (1400, 1430), False, 'import frappe\n')] |
from flask import Flask
from flask import request
from flask import Response
from handling import Handler
import json
app = Flask(__name__)
INDEX_NAME = 'contacts'
PORT = 9200
handler = Handler(INDEX_NAME, port = PORT, wipe_index = True)
@app.route('/contact', methods=['GET','POST'])
def contact_without_name():
if request.method == 'POST':
if handler.create_contact(request.json):
return Response('{"result": "created"}',
status = 201,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
else:
res = handler.list_contacts(request.args)
if res:
return Response('{"data":' + json.dumps(res) + '}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
@app.route('/contact/<name>', methods=['GET', 'PUT', 'DELETE'])
def contact_with_name(name):
if request.method == 'GET':
res = handler.list_a_contact(name)
if res:
return Response('{"data":' + json.dumps(res) + '}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
elif request.method == 'PUT':
if handler.update_contact(request.json):
return Response('{"result": "updated"}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
else:
if handler.delete_contact(name):
return Response('{"result": "deleted"}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
| [
"json.dumps",
"flask.Response",
"handling.Handler",
"flask.Flask"
] | [((125, 140), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (130, 140), False, 'from flask import Flask\n'), ((189, 236), 'handling.Handler', 'Handler', (['INDEX_NAME'], {'port': 'PORT', 'wipe_index': '(True)'}), '(INDEX_NAME, port=PORT, wipe_index=True)\n', (196, 236), False, 'from handling import Handler\n'), ((418, 492), 'flask.Response', 'Response', (['"""{"result": "created"}"""'], {'status': '(201)', 'mimetype': '"""application/json"""'}), '(\'{"result": "created"}\', status=201, mimetype=\'application/json\')\n', (426, 492), False, 'from flask import Response\n'), ((586, 660), 'flask.Response', 'Response', (['"""{"result": "failure"}"""'], {'status': '(400)', 'mimetype': '"""application/json"""'}), '(\'{"result": "failure"}\', status=400, mimetype=\'application/json\')\n', (594, 660), False, 'from flask import Response\n'), ((995, 1069), 'flask.Response', 'Response', (['"""{"result": "failure"}"""'], {'status': '(400)', 'mimetype': '"""application/json"""'}), '(\'{"result": "failure"}\', status=400, mimetype=\'application/json\')\n', (1003, 1069), False, 'from flask import Response\n'), ((1513, 1587), 'flask.Response', 'Response', (['"""{"result": "failure"}"""'], {'status': '(400)', 'mimetype': '"""application/json"""'}), '(\'{"result": "failure"}\', status=400, mimetype=\'application/json\')\n', (1521, 1587), False, 'from flask import Response\n'), ((1750, 1824), 'flask.Response', 'Response', (['"""{"result": "updated"}"""'], {'status': '(200)', 'mimetype': '"""application/json"""'}), '(\'{"result": "updated"}\', status=200, mimetype=\'application/json\')\n', (1758, 1824), False, 'from flask import Response\n'), ((1918, 1992), 'flask.Response', 'Response', (['"""{"result": "failure"}"""'], {'status': '(400)', 'mimetype': '"""application/json"""'}), '(\'{"result": "failure"}\', status=400, mimetype=\'application/json\')\n', (1926, 1992), False, 'from flask import Response\n'), ((2123, 2197), 'flask.Response', 'Response', (['"""{"result": "deleted"}"""'], {'status': '(200)', 'mimetype': '"""application/json"""'}), '(\'{"result": "deleted"}\', status=200, mimetype=\'application/json\')\n', (2131, 2197), False, 'from flask import Response\n'), ((2291, 2365), 'flask.Response', 'Response', (['"""{"result": "failure"}"""'], {'status': '(400)', 'mimetype': '"""application/json"""'}), '(\'{"result": "failure"}\', status=400, mimetype=\'application/json\')\n', (2299, 2365), False, 'from flask import Response\n'), ((838, 853), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (848, 853), False, 'import json\n'), ((1356, 1371), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (1366, 1371), False, 'import json\n')] |
from node import Node
class Stack:
def __init__(self):
self.head = None
def __str__(self):
node = self.head
list = []
while node:
list.append(node.get_item())
node = node.get_next()
return str(list)
def is_empty(self):
return not self.head
def push(self, item):
if not self.head:
self.head = Node(item)
else:
self.head = Node(item,self.head)
def pop(self):
if not self.head:
raise EmptyStackException('Cannot pop from a empty stack')
else:
item = self.head.get_item()
if self.head.get_next():
self.head = self.head.get_next()
else:
self.head = None
return item
def peek(self):
if not self.head:
raise EmptyStackException('Cannot peek from an empty stack')
else:
return self.head.get_item()
def size(self):
count = 0
node = self.head
while node:
count += 1
node = node.get_next()
return count
class EmptyStackException(Exception):
pass
| [
"node.Node"
] | [((324, 334), 'node.Node', 'Node', (['item'], {}), '(item)\n', (328, 334), False, 'from node import Node\n'), ((359, 380), 'node.Node', 'Node', (['item', 'self.head'], {}), '(item, self.head)\n', (363, 380), False, 'from node import Node\n')] |
import logging
import logging.handlers
"""
Returns a logging handler with a default format
"""
def get_logging_handler(log_filename):
handler = logging.handlers.RotatingFileHandler(log_filename, maxBytes=2*1024*1024, backupCount=5)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
return handler
| [
"logging.Formatter",
"logging.handlers.RotatingFileHandler"
] | [((155, 250), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['log_filename'], {'maxBytes': '(2 * 1024 * 1024)', 'backupCount': '(5)'}), '(log_filename, maxBytes=2 * 1024 * 1024,\n backupCount=5)\n', (191, 250), False, 'import logging\n'), ((259, 332), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(name)s - %(message)s')\n", (276, 332), False, 'import logging\n')] |
from queue import Queue
from typing import TYPE_CHECKING
from galaxy_crawler.crawl import Crawler
from galaxy_crawler.filters import DefaultFilter
from galaxy_crawler.filters.v1 import V1FilterEnum
from galaxy_crawler.models.engine import EngineType
from galaxy_crawler.models.dependeny_resolver import DependencyResolver
from galaxy_crawler.parser import ResponseParser
from galaxy_crawler.queries.v1 import V1QueryBuilder, V1QueryOrder
from galaxy_crawler.store import JsonDataStore, RDBStore
from galaxy_crawler.utils import mkdir
if TYPE_CHECKING:
from typing import List, Type
from galaxy_crawler.repositories import ResponseDataStore, RDBStorage
from galaxy_crawler.queries import QueryBuilder, QueryOrder
from galaxy_crawler.filters import Filter
from galaxy_crawler.constants import Target
from galaxy_command.app.config import Config
class AppComponent(object):
def __init__(self, config: 'Config'):
self.config = config
self.json_queue = Queue()
def get_response_data_stores(self) -> 'List[ResponseDataStore]':
output_dir = self.config.output_dir
stores = list()
mkdir(output_dir)
for store_format in self.config.output_format:
if store_format == "json":
stores.append(JsonDataStore(output_dir))
assert len(stores) != 0, "No data format specified"
return stores
def get_query_builder(self) -> 'QueryBuilder':
return V1QueryBuilder()
def get_crawler(self) -> 'Crawler':
return Crawler(
targets=self.get_targets(),
query_builder=self.get_query_builder(),
order=self.get_query_order(),
json_queue=self.json_queue,
wait_interval=self.config.interval,
retry=self.config.retry,
)
def get_parser(self) -> 'ResponseParser':
return ResponseParser(
json_queue=self.json_queue,
data_stores=self.get_response_data_stores(),
filters=self.get_filters(),
)
def get_query_order(self) -> 'QueryOrder':
order_by = self.config.order_by
if order_by not in V1QueryOrder.choices():
raise ValueError(f"Order type '{order_by}' is not supported.")
order = V1QueryOrder[self.config.order_by.upper()]
return order
def get_filters(self) -> 'List[Filter]':
filters = self.config.filters
if len(filters) == 0:
return [DefaultFilter()]
return [V1FilterEnum.by_expr(f) for f in filters]
def get_targets(self) -> 'List[Target]':
return self.config.targets
def get_engine(self):
url = self.config.storage
if url is None:
return EngineType.from_env_var().get_engine()
et = EngineType.from_url(url)
return et.get_engine(url)
def get_rdb_store_class(self) -> 'Type[RDBStorage]':
return RDBStore
def get_rdb_store(self) -> 'RDBStorage':
storage_cls = self.get_rdb_store_class()
return storage_cls(self.get_engine())
def get_dependency_resolver(self) -> 'DependencyResolver':
return DependencyResolver(self.get_query_builder(), int(self.config.interval))
| [
"galaxy_crawler.filters.DefaultFilter",
"galaxy_crawler.queries.v1.V1QueryBuilder",
"galaxy_crawler.models.engine.EngineType.from_url",
"galaxy_crawler.models.engine.EngineType.from_env_var",
"galaxy_crawler.utils.mkdir",
"galaxy_crawler.queries.v1.V1QueryOrder.choices",
"galaxy_crawler.filters.v1.V1Fil... | [((997, 1004), 'queue.Queue', 'Queue', ([], {}), '()\n', (1002, 1004), False, 'from queue import Queue\n'), ((1151, 1168), 'galaxy_crawler.utils.mkdir', 'mkdir', (['output_dir'], {}), '(output_dir)\n', (1156, 1168), False, 'from galaxy_crawler.utils import mkdir\n'), ((1469, 1485), 'galaxy_crawler.queries.v1.V1QueryBuilder', 'V1QueryBuilder', ([], {}), '()\n', (1483, 1485), False, 'from galaxy_crawler.queries.v1 import V1QueryBuilder, V1QueryOrder\n'), ((2785, 2809), 'galaxy_crawler.models.engine.EngineType.from_url', 'EngineType.from_url', (['url'], {}), '(url)\n', (2804, 2809), False, 'from galaxy_crawler.models.engine import EngineType\n'), ((2160, 2182), 'galaxy_crawler.queries.v1.V1QueryOrder.choices', 'V1QueryOrder.choices', ([], {}), '()\n', (2180, 2182), False, 'from galaxy_crawler.queries.v1 import V1QueryBuilder, V1QueryOrder\n'), ((2506, 2529), 'galaxy_crawler.filters.v1.V1FilterEnum.by_expr', 'V1FilterEnum.by_expr', (['f'], {}), '(f)\n', (2526, 2529), False, 'from galaxy_crawler.filters.v1 import V1FilterEnum\n'), ((2473, 2488), 'galaxy_crawler.filters.DefaultFilter', 'DefaultFilter', ([], {}), '()\n', (2486, 2488), False, 'from galaxy_crawler.filters import DefaultFilter\n'), ((1293, 1318), 'galaxy_crawler.store.JsonDataStore', 'JsonDataStore', (['output_dir'], {}), '(output_dir)\n', (1306, 1318), False, 'from galaxy_crawler.store import JsonDataStore, RDBStore\n'), ((2733, 2758), 'galaxy_crawler.models.engine.EngineType.from_env_var', 'EngineType.from_env_var', ([], {}), '()\n', (2756, 2758), False, 'from galaxy_crawler.models.engine import EngineType\n')] |
import ipywidgets as widgets
import ipywidgets
from traitlets import Unicode
import traitlets
from traittypes import Array
import logging
import numpy as np
from .serialize import array_cube_png_serialization, array_serialization
from .transferfunction import *
import warnings
logger = logging.getLogger("ipyvolume")
_last_volume_renderer = None
@widgets.register('ipyvolume.Scatter')
class Scatter(widgets.DOMWidget):
_view_name = Unicode('ScatterView').tag(sync=True)
_view_module = Unicode('ipyvolume').tag(sync=True)
_model_name = Unicode('ScatterModel').tag(sync=True)
_model_module = Unicode('ipyvolume').tag(sync=True)
x = Array(default_value=None).tag(sync=True, **array_serialization)
y = Array(default_value=None).tag(sync=True, **array_serialization)
z = Array(default_value=None).tag(sync=True, **array_serialization)
vx = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization)
vy = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization)
vz = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization)
selected = Array(default_value=None,allow_none=True).tag(sync=True, **array_serialization)
size = traitlets.Float(0.01).tag(sync=True)
size_selected = traitlets.Float(0.02).tag(sync=True)
color = traitlets.Unicode(default_value="red").tag(sync=True)
color_selected = traitlets.Unicode(default_value="white").tag(sync=True)
geo = traitlets.Unicode('diamond').tag(sync=True)
default_style = dict()
default_style["figure.facecolor"] = "black"
default_style["xaxis.color"] = "red"
default_style["yaxis.color"] = "green"
default_style["zaxis.color"] = "blue"
default_style["axes.color"] = "grey"
@widgets.register('ipyvolume.VolumeRendererThree')
class VolumeRendererThree(widgets.DOMWidget):
"""Widget class representing a volume (rendering) using three.js"""
_view_name = Unicode('VolumeRendererThreeView').tag(sync=True)
_view_module = Unicode('ipyvolume').tag(sync=True)
_model_name = Unicode('VolumeRendererThreeModel').tag(sync=True)
_model_module = Unicode('ipyvolume').tag(sync=True)
data = Array(default_value=None, allow_none=True).tag(sync=True, **array_cube_png_serialization)
data_min = traitlets.CFloat().tag(sync=True)
data_max = traitlets.CFloat().tag(sync=True)
tf = traitlets.Instance(TransferFunction, allow_none=True).tag(sync=True, **ipywidgets.widget_serialization)
angle1 = traitlets.Float(0.1).tag(sync=True)
angle2 = traitlets.Float(0.2).tag(sync=True)
scatters = traitlets.List(traitlets.Instance(Scatter), [], allow_none=False).tag(sync=True, **ipywidgets.widget_serialization)
animation = traitlets.Float(1000.0).tag(sync=True)
ambient_coefficient = traitlets.Float(0.5).tag(sync=True)
diffuse_coefficient = traitlets.Float(0.8).tag(sync=True)
specular_coefficient = traitlets.Float(0.5).tag(sync=True)
specular_exponent = traitlets.Float(5).tag(sync=True)
stereo = traitlets.Bool(False).tag(sync=True)
fullscreen = traitlets.Bool(False).tag(sync=True)
width = traitlets.CInt(500).tag(sync=True)
height = traitlets.CInt(400).tag(sync=True)
downscale = traitlets.CInt(1).tag(sync=True)
show = traitlets.Unicode("Volume").tag(sync=True) # for debugging
xlim = traitlets.List(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2).tag(sync=True)
ylim = traitlets.List(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2).tag(sync=True)
zlim = traitlets.List(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2).tag(sync=True)
xlabel = traitlets.Unicode("x").tag(sync=True)
ylabel = traitlets.Unicode("y").tag(sync=True)
zlabel = traitlets.Unicode("z").tag(sync=True)
style = traitlets.Dict(default_value=default_style).tag(sync=True)
#xlim = traitlets.Tuple(traitlets.CFloat(0), traitlets.CFloat(1)).tag(sync=True)
#y#lim = traitlets.Tuple(traitlets.CFloat(0), traitlets.CFloat(1)).tag(sync=True)
#zlim = traitlets.Tuple(traitlets.CFloat(0), traitlets.CFloat(1)).tag(sync=True)
def _volume_widets(v, lighting=False):
import ipywidgets
#angle1 = ipywidgets.FloatSlider(min=0, max=np.pi*2, value=v.angle1, description="angle1")
#angle2 = ipywidgets.FloatSlider(min=0, max=np.pi*2, value=v.angle2, description="angle2")
#ipywidgets.jslink((v, 'angle1'), (angle1, 'value'))
#ipywidgets.jslink((v, 'angle2'), (angle2, 'value'))
if lighting:
ambient_coefficient = ipywidgets.FloatSlider(min=0, max=1, step=0.001, value=v.ambient_coefficient, description="ambient")
diffuse_coefficient = ipywidgets.FloatSlider(min=0, max=1, step=0.001, value=v.diffuse_coefficient, description="diffuse")
specular_coefficient = ipywidgets.FloatSlider(min=0, max=1, step=0.001, value=v.specular_coefficient, description="specular")
specular_exponent = ipywidgets.FloatSlider(min=0, max=10, step=0.001, value=v.specular_exponent, description="specular exp")
#angle2 = ipywidgets.FloatSlider(min=0, max=np.pi*2, value=v.angle2, description="angle2")
ipywidgets.jslink((v, 'ambient_coefficient'), (ambient_coefficient, 'value'))
ipywidgets.jslink((v, 'diffuse_coefficient'), (diffuse_coefficient, 'value'))
ipywidgets.jslink((v, 'specular_coefficient'), (specular_coefficient, 'value'))
ipywidgets.jslink((v, 'specular_exponent'), (specular_exponent, 'value'))
widgets_bottom = [ipywidgets.HBox([ambient_coefficient, diffuse_coefficient]),
ipywidgets.HBox([specular_coefficient, specular_exponent])]
else:
widgets_bottom = []
v.ambient_coefficient = 1
v.diffuse_coefficient = 0
v.specular_coefficient = 0
if 1:
stereo = widgets.ToggleButton(value=v.stereo, description='stereo', icon='eye')
fullscreen = widgets.ToggleButton(value=v.stereo, description='fullscreen', icon='arrows-alt')
ipywidgets.jslink((v, 'stereo'), (stereo, 'value'))
ipywidgets.jslink((v, 'fullscreen'), (fullscreen, 'value'))
widgets_bottom += [ipywidgets.HBox([stereo,fullscreen])]
return ipywidgets.VBox(
[v.tf.control(), v,
] + widgets_bottom# , ipywidgets.HBox([angle1, angle2])
)
def volshow(*args, **kwargs):
"""Deprecated: please use ipyvolume.quickvol or use the ipyvolume.pylab interface"""
warnings.warn("Please use ipyvolume.quickvol or use the ipyvolume.pylab interface", DeprecationWarning, stacklevel=2)
return quickvolshow(*args, **kwargs)
def quickquiver(x, y, z, u, v, w, **kwargs):
import ipyvolume.pylab as p3
p3.figure()
p3.quiver(x, y, z, u, v, w, **kwargs)
return p3.current.container
def quickscatter(x, y, z, **kwargs):
import ipyvolume.pylab as p3
p3.figure()
p3.scatter(x, y, z, **kwargs)
return p3.current.container
def quickvolshow(data, lighting=False, data_min=None, data_max=None, tf=None, stereo=False,
width=400, height=500,
ambient_coefficient=0.5, diffuse_coefficient=0.8,
specular_coefficient=0.5, specular_exponent=5,
downscale=1,
level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1, **kwargs):
"""
Visualize a 3d array using volume rendering
:param data: 3d numpy array
:param lighting: boolean, to use lighting or not, if set to false, lighting parameters will be overriden
:param data_min: minimum value to consider for data, if None, computed using np.nanmin
:param data_max: maximum value to consider for data, if None, computed using np.nanmax
:param tf: transfer function (see ipyvolume.transfer_function, or use the argument below)
:param stereo: stereo view for virtual reality (cardboard and similar VR head mount)
:param width: width of rendering surface
:param height: height of rendering surface
:param ambient_coefficient: lighting parameter
:param diffuse_coefficient: lighting parameter
:param specular_coefficient: lighting parameter
:param specular_exponent: lighting parameter
:param downscale: downscale the rendering for better performance, for instance when set to 2, a 512x512 canvas will show a 256x256 rendering upscaled, but it will render twice as fast.
:param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3
:param opacity: opacity(ies) for each level, scalar or sequence of max length 3
:param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3
:param kwargs: extra argument passed to Volume and default transfer function
:return:
"""
if tf is None: # TODO: should this just call the pylab interface?
#tf = TransferFunctionJsBumps(**kwargs)
tf_kwargs = {}
# level, opacity and widths can be scalars
try:
level[0]
except:
level = [level]
try:
opacity[0]
except:
opacity = [opacity] * 3
try:
level_width[0]
except:
level_width = [level_width] * 3
#clip off lists
min_length = min(len(level), len(level_width), len(opacity))
level = list(level[:min_length])
opacity = list(opacity[:min_length])
level_width = list(level_width[:min_length])
# append with zeros
while len(level) < 3:
level.append(0)
while len(opacity) < 3:
opacity.append(0)
while len(level_width) < 3:
level_width.append(0)
for i in range(1,4):
tf_kwargs["level"+str(i)] = level[i-1]
tf_kwargs["opacity"+str(i)] = opacity[i-1]
tf_kwargs["width"+str(i)] = level_width[i-1]
tf = TransferFunctionWidgetJs3(**tf_kwargs)
if data_min is None:
data_min = np.nanmin(data)
if data_max is None:
data_max = np.nanmax(data)
v = VolumeRendererThree(data=data, data_min=data_min, data_max=data_max, stereo=stereo,
width=width, height=height,
ambient_coefficient=ambient_coefficient,
diffuse_coefficient=diffuse_coefficient,
specular_coefficient=specular_coefficient,
specular_exponent=specular_exponent,
tf=tf, **kwargs)
box = _volume_widets(v, lighting=lighting)
return box
def scatter(x, y, z, color=(1,0,0), s=0.01):
global _last_figure;
fig = _last_figure
if fig is None:
fig = volshow(None)
fig.scatter = Scatter(x=x, y=y, z=z, color=color, size=s)
fig.volume.scatter = fig.scatter
return fig
| [
"logging.getLogger",
"traitlets.Instance",
"traitlets.List",
"ipywidgets.FloatSlider",
"numpy.nanmin",
"ipywidgets.HBox",
"traittypes.Array",
"ipywidgets.register",
"numpy.nanmax",
"warnings.warn",
"ipyvolume.pylab.figure",
"traitlets.Unicode",
"traitlets.CInt",
"ipywidgets.jslink",
"tra... | [((289, 319), 'logging.getLogger', 'logging.getLogger', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (306, 319), False, 'import logging\n'), ((352, 389), 'ipywidgets.register', 'widgets.register', (['"""ipyvolume.Scatter"""'], {}), "('ipyvolume.Scatter')\n", (368, 389), True, 'import ipywidgets as widgets\n'), ((1748, 1797), 'ipywidgets.register', 'widgets.register', (['"""ipyvolume.VolumeRendererThree"""'], {}), "('ipyvolume.VolumeRendererThree')\n", (1764, 1797), True, 'import ipywidgets as widgets\n'), ((6410, 6536), 'warnings.warn', 'warnings.warn', (['"""Please use ipyvolume.quickvol or use the ipyvolume.pylab interface"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'Please use ipyvolume.quickvol or use the ipyvolume.pylab interface',\n DeprecationWarning, stacklevel=2)\n", (6423, 6536), False, 'import warnings\n'), ((6652, 6663), 'ipyvolume.pylab.figure', 'p3.figure', ([], {}), '()\n', (6661, 6663), True, 'import ipyvolume.pylab as p3\n'), ((6668, 6705), 'ipyvolume.pylab.quiver', 'p3.quiver', (['x', 'y', 'z', 'u', 'v', 'w'], {}), '(x, y, z, u, v, w, **kwargs)\n', (6677, 6705), True, 'import ipyvolume.pylab as p3\n'), ((6813, 6824), 'ipyvolume.pylab.figure', 'p3.figure', ([], {}), '()\n', (6822, 6824), True, 'import ipyvolume.pylab as p3\n'), ((6829, 6858), 'ipyvolume.pylab.scatter', 'p3.scatter', (['x', 'y', 'z'], {}), '(x, y, z, **kwargs)\n', (6839, 6858), True, 'import ipyvolume.pylab as p3\n'), ((4526, 4631), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'step': '(0.001)', 'value': 'v.ambient_coefficient', 'description': '"""ambient"""'}), "(min=0, max=1, step=0.001, value=v.\n ambient_coefficient, description='ambient')\n", (4548, 4631), False, 'import ipywidgets\n'), ((4657, 4762), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'step': '(0.001)', 'value': 'v.diffuse_coefficient', 'description': '"""diffuse"""'}), "(min=0, max=1, step=0.001, value=v.\n diffuse_coefficient, description='diffuse')\n", (4679, 4762), False, 'import ipywidgets\n'), ((4789, 4896), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'step': '(0.001)', 'value': 'v.specular_coefficient', 'description': '"""specular"""'}), "(min=0, max=1, step=0.001, value=v.\n specular_coefficient, description='specular')\n", (4811, 4896), False, 'import ipywidgets\n'), ((4920, 5028), 'ipywidgets.FloatSlider', 'ipywidgets.FloatSlider', ([], {'min': '(0)', 'max': '(10)', 'step': '(0.001)', 'value': 'v.specular_exponent', 'description': '"""specular exp"""'}), "(min=0, max=10, step=0.001, value=v.specular_exponent,\n description='specular exp')\n", (4942, 5028), False, 'import ipywidgets\n'), ((5132, 5209), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'ambient_coefficient')", "(ambient_coefficient, 'value')"], {}), "((v, 'ambient_coefficient'), (ambient_coefficient, 'value'))\n", (5149, 5209), False, 'import ipywidgets\n'), ((5218, 5295), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'diffuse_coefficient')", "(diffuse_coefficient, 'value')"], {}), "((v, 'diffuse_coefficient'), (diffuse_coefficient, 'value'))\n", (5235, 5295), False, 'import ipywidgets\n'), ((5304, 5383), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'specular_coefficient')", "(specular_coefficient, 'value')"], {}), "((v, 'specular_coefficient'), (specular_coefficient, 'value'))\n", (5321, 5383), False, 'import ipywidgets\n'), ((5392, 5465), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'specular_exponent')", "(specular_exponent, 'value')"], {}), "((v, 'specular_exponent'), (specular_exponent, 'value'))\n", (5409, 5465), False, 'import ipywidgets\n'), ((5791, 5861), 'ipywidgets.ToggleButton', 'widgets.ToggleButton', ([], {'value': 'v.stereo', 'description': '"""stereo"""', 'icon': '"""eye"""'}), "(value=v.stereo, description='stereo', icon='eye')\n", (5811, 5861), True, 'import ipywidgets as widgets\n'), ((5883, 5969), 'ipywidgets.ToggleButton', 'widgets.ToggleButton', ([], {'value': 'v.stereo', 'description': '"""fullscreen"""', 'icon': '"""arrows-alt"""'}), "(value=v.stereo, description='fullscreen', icon=\n 'arrows-alt')\n", (5903, 5969), True, 'import ipywidgets as widgets\n'), ((5973, 6024), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'stereo')", "(stereo, 'value')"], {}), "((v, 'stereo'), (stereo, 'value'))\n", (5990, 6024), False, 'import ipywidgets\n'), ((6033, 6092), 'ipywidgets.jslink', 'ipywidgets.jslink', (["(v, 'fullscreen')", "(fullscreen, 'value')"], {}), "((v, 'fullscreen'), (fullscreen, 'value'))\n", (6050, 6092), False, 'import ipywidgets\n'), ((9903, 9918), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (9912, 9918), True, 'import numpy as np\n'), ((9963, 9978), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (9972, 9978), True, 'import numpy as np\n'), ((441, 463), 'traitlets.Unicode', 'Unicode', (['"""ScatterView"""'], {}), "('ScatterView')\n", (448, 463), False, 'from traitlets import Unicode\n'), ((498, 518), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (505, 518), False, 'from traitlets import Unicode\n'), ((552, 575), 'traitlets.Unicode', 'Unicode', (['"""ScatterModel"""'], {}), "('ScatterModel')\n", (559, 575), False, 'from traitlets import Unicode\n'), ((611, 631), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (618, 631), False, 'from traitlets import Unicode\n'), ((655, 680), 'traittypes.Array', 'Array', ([], {'default_value': 'None'}), '(default_value=None)\n', (660, 680), False, 'from traittypes import Array\n'), ((727, 752), 'traittypes.Array', 'Array', ([], {'default_value': 'None'}), '(default_value=None)\n', (732, 752), False, 'from traittypes import Array\n'), ((799, 824), 'traittypes.Array', 'Array', ([], {'default_value': 'None'}), '(default_value=None)\n', (804, 824), False, 'from traittypes import Array\n'), ((872, 914), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (877, 914), False, 'from traittypes import Array\n'), ((961, 1003), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (966, 1003), False, 'from traittypes import Array\n'), ((1050, 1092), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (1055, 1092), False, 'from traittypes import Array\n'), ((1145, 1187), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (1150, 1187), False, 'from traittypes import Array\n'), ((1236, 1257), 'traitlets.Float', 'traitlets.Float', (['(0.01)'], {}), '(0.01)\n', (1251, 1257), False, 'import traitlets\n'), ((1293, 1314), 'traitlets.Float', 'traitlets.Float', (['(0.02)'], {}), '(0.02)\n', (1308, 1314), False, 'import traitlets\n'), ((1342, 1380), 'traitlets.Unicode', 'traitlets.Unicode', ([], {'default_value': '"""red"""'}), "(default_value='red')\n", (1359, 1380), False, 'import traitlets\n'), ((1417, 1457), 'traitlets.Unicode', 'traitlets.Unicode', ([], {'default_value': '"""white"""'}), "(default_value='white')\n", (1434, 1457), False, 'import traitlets\n'), ((1483, 1511), 'traitlets.Unicode', 'traitlets.Unicode', (['"""diamond"""'], {}), "('diamond')\n", (1500, 1511), False, 'import traitlets\n'), ((1933, 1967), 'traitlets.Unicode', 'Unicode', (['"""VolumeRendererThreeView"""'], {}), "('VolumeRendererThreeView')\n", (1940, 1967), False, 'from traitlets import Unicode\n'), ((2002, 2022), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (2009, 2022), False, 'from traitlets import Unicode\n'), ((2056, 2091), 'traitlets.Unicode', 'Unicode', (['"""VolumeRendererThreeModel"""'], {}), "('VolumeRendererThreeModel')\n", (2063, 2091), False, 'from traitlets import Unicode\n'), ((2127, 2147), 'traitlets.Unicode', 'Unicode', (['"""ipyvolume"""'], {}), "('ipyvolume')\n", (2134, 2147), False, 'from traitlets import Unicode\n'), ((2175, 2217), 'traittypes.Array', 'Array', ([], {'default_value': 'None', 'allow_none': '(True)'}), '(default_value=None, allow_none=True)\n', (2180, 2217), False, 'from traittypes import Array\n'), ((2280, 2298), 'traitlets.CFloat', 'traitlets.CFloat', ([], {}), '()\n', (2296, 2298), False, 'import traitlets\n'), ((2329, 2347), 'traitlets.CFloat', 'traitlets.CFloat', ([], {}), '()\n', (2345, 2347), False, 'import traitlets\n'), ((2372, 2425), 'traitlets.Instance', 'traitlets.Instance', (['TransferFunction'], {'allow_none': '(True)'}), '(TransferFunction, allow_none=True)\n', (2390, 2425), False, 'import traitlets\n'), ((2489, 2509), 'traitlets.Float', 'traitlets.Float', (['(0.1)'], {}), '(0.1)\n', (2504, 2509), False, 'import traitlets\n'), ((2538, 2558), 'traitlets.Float', 'traitlets.Float', (['(0.2)'], {}), '(0.2)\n', (2553, 2558), False, 'import traitlets\n'), ((2723, 2746), 'traitlets.Float', 'traitlets.Float', (['(1000.0)'], {}), '(1000.0)\n', (2738, 2746), False, 'import traitlets\n'), ((2789, 2809), 'traitlets.Float', 'traitlets.Float', (['(0.5)'], {}), '(0.5)\n', (2804, 2809), False, 'import traitlets\n'), ((2851, 2871), 'traitlets.Float', 'traitlets.Float', (['(0.8)'], {}), '(0.8)\n', (2866, 2871), False, 'import traitlets\n'), ((2914, 2934), 'traitlets.Float', 'traitlets.Float', (['(0.5)'], {}), '(0.5)\n', (2929, 2934), False, 'import traitlets\n'), ((2974, 2992), 'traitlets.Float', 'traitlets.Float', (['(5)'], {}), '(5)\n', (2989, 2992), False, 'import traitlets\n'), ((3021, 3042), 'traitlets.Bool', 'traitlets.Bool', (['(False)'], {}), '(False)\n', (3035, 3042), False, 'import traitlets\n'), ((3075, 3096), 'traitlets.Bool', 'traitlets.Bool', (['(False)'], {}), '(False)\n', (3089, 3096), False, 'import traitlets\n'), ((3125, 3144), 'traitlets.CInt', 'traitlets.CInt', (['(500)'], {}), '(500)\n', (3139, 3144), False, 'import traitlets\n'), ((3173, 3192), 'traitlets.CInt', 'traitlets.CInt', (['(400)'], {}), '(400)\n', (3187, 3192), False, 'import traitlets\n'), ((3224, 3241), 'traitlets.CInt', 'traitlets.CInt', (['(1)'], {}), '(1)\n', (3238, 3241), False, 'import traitlets\n'), ((3268, 3295), 'traitlets.Unicode', 'traitlets.Unicode', (['"""Volume"""'], {}), "('Volume')\n", (3285, 3295), False, 'import traitlets\n'), ((3339, 3413), 'traitlets.List', 'traitlets.List', (['traitlets.CFloat'], {'default_value': '[0, 1]', 'minlen': '(2)', 'maxlen': '(2)'}), '(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2)\n', (3353, 3413), False, 'import traitlets\n'), ((3440, 3514), 'traitlets.List', 'traitlets.List', (['traitlets.CFloat'], {'default_value': '[0, 1]', 'minlen': '(2)', 'maxlen': '(2)'}), '(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2)\n', (3454, 3514), False, 'import traitlets\n'), ((3541, 3615), 'traitlets.List', 'traitlets.List', (['traitlets.CFloat'], {'default_value': '[0, 1]', 'minlen': '(2)', 'maxlen': '(2)'}), '(traitlets.CFloat, default_value=[0, 1], minlen=2, maxlen=2)\n', (3555, 3615), False, 'import traitlets\n'), ((3645, 3667), 'traitlets.Unicode', 'traitlets.Unicode', (['"""x"""'], {}), "('x')\n", (3662, 3667), False, 'import traitlets\n'), ((3696, 3718), 'traitlets.Unicode', 'traitlets.Unicode', (['"""y"""'], {}), "('y')\n", (3713, 3718), False, 'import traitlets\n'), ((3747, 3769), 'traitlets.Unicode', 'traitlets.Unicode', (['"""z"""'], {}), "('z')\n", (3764, 3769), False, 'import traitlets\n'), ((3798, 3841), 'traitlets.Dict', 'traitlets.Dict', ([], {'default_value': 'default_style'}), '(default_value=default_style)\n', (3812, 3841), False, 'import traitlets\n'), ((5492, 5551), 'ipywidgets.HBox', 'ipywidgets.HBox', (['[ambient_coefficient, diffuse_coefficient]'], {}), '([ambient_coefficient, diffuse_coefficient])\n', (5507, 5551), False, 'import ipywidgets\n'), ((5562, 5620), 'ipywidgets.HBox', 'ipywidgets.HBox', (['[specular_coefficient, specular_exponent]'], {}), '([specular_coefficient, specular_exponent])\n', (5577, 5620), False, 'import ipywidgets\n'), ((6120, 6157), 'ipywidgets.HBox', 'ipywidgets.HBox', (['[stereo, fullscreen]'], {}), '([stereo, fullscreen])\n', (6135, 6157), False, 'import ipywidgets\n'), ((2605, 2632), 'traitlets.Instance', 'traitlets.Instance', (['Scatter'], {}), '(Scatter)\n', (2623, 2632), False, 'import traitlets\n')] |
# Copyright (c) 2015 <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import logging
import tempfile
from fine_mapping_pipeline.utils.shell import run_command_return_output, run_command
from fine_mapping_pipeline.config import __1000_genomes_sample_map__
def _load_one_thousand_genomes_sample_dict():
"""
Load the 1000 thousand genomes dataset
The format is as follows.
<SAMPLE NAME> <POPULATION>
EUR NA12839
"""
one_thousand_genomes_dict = {}
with open(__1000_genomes_sample_map__) as samples:
for sample_l in samples:
sample_l = sample_l.strip()
s_pop = sample_l.split('\t')[1]
sample_name = sample_l.split('\t')[0]
try:
one_thousand_genomes_dict[s_pop].append(sample_name)
except KeyError:
one_thousand_genomes_dict[s_pop] = [sample_name]
return one_thousand_genomes_dict
def _get_samples_indices(samples, super_population):
"""
Obtain the indices to keep from each line of the VCF.
"""
onekg_dict = _load_one_thousand_genomes_sample_dict()
super_pop_list = onekg_dict[super_population]
indices = []
for i, sample in enumerate(samples):
if sample in super_pop_list:
indices.append(i)
# Let's make sure we return all the indices to keep
# Need to get columns 1:9
indices = [ i + 9 for i in indices]
indices = range(0,9) + indices
return indices
def _get_sample_list(super_population):
"""
Loads 1KG dictionary and extract the super population that we are working with.
"""
onekg_dict = _load_one_thousand_genomes_sample_dict()
super_pop_list = onekg_dict[super_population]
sup_pop_file = open(super_population + ".tmp","w")
for sample in super_pop_list:
sup_pop_file.write(sample + "\n")
return sup_pop_file.name
__BCFTOOLS__COMMAND__="""
bcftools view --force-samples -m2 -M2 -S {0} {1} | bcftools filter -i "MAF > {2}"
"""
def _get_cnv_alternate():
"""
Get an alternate allele when you have a CNV
"""
return 'A', 'T'
def extract_population_from_1000_genomes(vcf, super_population="EUR", biallelic_only=True, min_maf="0.01"):
"""
Extract a population from a VCF file.
Function also removes any tri-allelic SNPs
"""
logging.info("Extracting {0} population from VCF".format(super_population))
sample_list_file = _get_sample_list(super_population)
bcftools_command = __BCFTOOLS__COMMAND__.format(sample_list_file, vcf, min_maf)
logging.info(bcftools_command)
#sys.exit(1)
output_vcf = run_command_return_output(bcftools_command, shell=True)
last_pos = -1
vcf_temp = ""
for i, line in enumerate(output_vcf.splitlines()):
if i % 1000 ==0 :
logging.info("Processed {0} lines".format(i))
if "#" in line:
vcf_temp += line + '\n'
else:
line_split = line.split("\t")
rsid = line_split[2]
position = line_split[1]
pos2 = int(position)
if pos2 == last_pos:
continue
last_pos = pos2
reference = line_split[3]
alternate = line_split[4]
# Indels and Copy-number
if "CN" in alternate:
# Must be a CNV
reference, alternate = _get_cnv_alternate()
rsid = "rsCNV" + position
elif len(reference) > 1 or len(alternate) > 1:
reference, alternate = _get_cnv_alternate()
rsid = "rsINDEL" + position
if rsid == '.':
rsid = "rs" + chrom + ":" + position + "_" + reference + "/" + alternate
line_split[3] = reference
line_split[4] = alternate
line_split[2] = rsid
vcf_temp += "\t".join(line_split) + "\n"
#for i, line in enumerate(vcf.splitlines()):
# if (i %1000 == 0):
# logging.info("Processed {0} lines from the VCF file".format(i))
# if "#" in line:
# if "#CHROM" in line:
# samples = line.split('\t')[9:len(line.split('\t'))]
# sample_indices = _get_samples_indices(samples, super_population)
# vcf_temp += '\t'.join([item for i ,item in enumerate(line.split('\t')) if i in sample_indices]) + '\n'
# else:
# vcf_temp += line + '\n'
# else:
# vcf_temp_l = None
# if biallelic_only:
# alt = line.split('\t')[4]
# if "," not in alt:
# vcf_temp_l = [item for i, item in enumerate(line.split('\t')) if i in sample_indices]
# else:
# vcf_temp_l = [item for i, item in enumerate(line.split('\t')) if i in sample_indices]
# if vcf_temp_l is not None:
# num_aa = len([item for item in vcf_temp_l[9:] if item == '0|0'])
# num_ab = len([item for item in vcf_temp_l[9:] if item == '0|1'])
# num_ab2 = len([item for item in vcf_temp_l[9:] if item == '1|0'])
# num_ab += num_ab2
# num_bb = len([item for item in vcf_temp_l[9:] if item == '1|1'])
# if num_aa == 0 and num_ab == 0:
# continue
# elif num_ab == 0 and num_bb == 0:
# continue
# else:
# numa = num_aa + num_ab
# numb = num_ab + num_bb
# total_alleles = num_aa + num_ab + num_bb
# if numa > numb:
# maf = numa/float(total_alleles)
# else:
# maf = numb/float(total_alleles)
# if maf > min_maf:
# vcf_temp += '\t'.join(vcf_temp_l) + '\n'
return vcf_temp
if __name__ == "__main__":
import doctest
docetst.testmod()
| [
"fine_mapping_pipeline.utils.shell.run_command_return_output",
"logging.info"
] | [((3643, 3673), 'logging.info', 'logging.info', (['bcftools_command'], {}), '(bcftools_command)\n', (3655, 3673), False, 'import logging\n'), ((3708, 3763), 'fine_mapping_pipeline.utils.shell.run_command_return_output', 'run_command_return_output', (['bcftools_command'], {'shell': '(True)'}), '(bcftools_command, shell=True)\n', (3733, 3763), False, 'from fine_mapping_pipeline.utils.shell import run_command_return_output, run_command\n')] |
from src.requests.search_requests import search
from constants.search_bar_constants import GOOGLE_SEARCH_URL
def search_testcases(testcases):
for testcase in testcases:
response = search(GOOGLE_SEARCH_URL, testcase)
print(response.url)
print(response.ok)
print(response.status_code)
if __name__ == "__main__":
testcases_to_search = ["1", "85", "word", "wordd", "Hi there!"]
search_testcases(testcases_to_search)
| [
"src.requests.search_requests.search"
] | [((194, 229), 'src.requests.search_requests.search', 'search', (['GOOGLE_SEARCH_URL', 'testcase'], {}), '(GOOGLE_SEARCH_URL, testcase)\n', (200, 229), False, 'from src.requests.search_requests import search\n')] |
"""
``$ drafttopic fetch_text -h``
::
Fetches text for labelings using a MediaWiki API.
Usage:
fetch_text --api-host=<url> [--labelings=<path>] [--output=<path>]
[--verbose]
Options:
-h --help Show this documentation.
--api-host=<url> The hostname of a MediaWiki e.g.
"https://en.wikipedia.org"
--labelings=<path> Path to a containting observations with extracted
labels. [default: <stdin>]
--output=<path> Path to a file to write new observations
(with text) out to. [default: <stdout>]
--verbose Prints dots and stuff to stderr
"""
import logging
import re
import sys
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
import mwapi
from docopt import docopt
from mw.lib import title as mwtitle
from revscoring.utilities.util import dump_observation, read_observations
from .wikiprojects_common import WIKIPROJECT_FETCH_THREADS
logger = logging.getLogger(__name__)
REDIRECT_RE = re.compile("#redirect", re.I)
DRAFTTOPIC_UA = "Drafttopic fetch_text <<EMAIL>>"
def main(argv=None):
args = docopt(__doc__, argv=argv)
if args['--labelings'] == '<stdin>':
labelings = read_observations(sys.stdin)
else:
labelings = read_observations(open(args['--labelings']))
if args['--output'] == '<stdout>':
output = sys.stdout
else:
output = open(args['--output'], 'w')
session = mwapi.Session(args['--api-host'],
user_agent=DRAFTTOPIC_UA)
verbose = args['--verbose']
run(labelings, output, session, verbose)
def run(labelings, output, session, verbose):
for ob in fetch_text(session, labelings, verbose):
dump_observation(ob, output)
def fetch_text(session, labelings, verbose=False):
"""
Fetches article text for labelings from a MediaWiki API.
:Parameters:
session : :class:`mwapi.Session`
An API session to use for querying
labelings : `iterable`(`dict`)
A collection of labeling events to add text to
verbose : `bool`
Print dots and stuff
:Returns:
An `iterator` of labelings augmented with 'text'. Note that labelings
of articles that aren't found will not be
included.
"""
executor = ThreadPoolExecutor(max_workers=WIKIPROJECT_FETCH_THREADS)
_fetch_text = build_fetch_text_extractor(session)
for annotated in executor.map(_fetch_text, labelings):
if annotated is not None:
yield annotated
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
def build_fetch_text_extractor(session):
def _fetch_text(labeling):
result = session.get(
action="query",
prop="revisions",
rvprop=["content", "ids"],
titles=labeling['talk_page_title'],
rvlimit=1,
rvdir="newer",
formatversion=2
)
page_documents = None
try:
page_documents = result['query']['pages']
except (KeyError, IndexError):
logger.warn("No results returned.")
return None
for page_doc in page_documents:
try:
rev_doc = page_doc['revisions'][0]
text = rev_doc['content']
if is_article(text):
title = mwtitle.normalize(page_doc['title'])
labeling['text'] = text
labeling['title'] = title
labeling['rev_id'] = rev_doc['revid']
return labeling
else:
sys.stderr.write("?")
sys.stderr.write(page_doc['title'])
sys.stderr.flush()
except (KeyError, IndexError):
# TODO: warn
return None
return _fetch_text
def is_article(text):
return not (text is None or
len(text) < 50 or
REDIRECT_RE.match(text))
def chunkify(iterable, size):
while True:
output = tuple(islice(iterable, size))
if output:
yield output
else:
break
| [
"logging.getLogger",
"itertools.islice",
"re.compile",
"concurrent.futures.ThreadPoolExecutor",
"sys.stderr.flush",
"revscoring.utilities.util.dump_observation",
"sys.stderr.write",
"revscoring.utilities.util.read_observations",
"mw.lib.title.normalize",
"docopt.docopt",
"mwapi.Session"
] | [((1080, 1107), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1097, 1107), False, 'import logging\n'), ((1122, 1151), 're.compile', 're.compile', (['"""#redirect"""', 're.I'], {}), "('#redirect', re.I)\n", (1132, 1151), False, 'import re\n'), ((1236, 1262), 'docopt.docopt', 'docopt', (['__doc__'], {'argv': 'argv'}), '(__doc__, argv=argv)\n', (1242, 1262), False, 'from docopt import docopt\n'), ((1567, 1626), 'mwapi.Session', 'mwapi.Session', (["args['--api-host']"], {'user_agent': 'DRAFTTOPIC_UA'}), "(args['--api-host'], user_agent=DRAFTTOPIC_UA)\n", (1580, 1626), False, 'import mwapi\n'), ((2444, 2501), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'WIKIPROJECT_FETCH_THREADS'}), '(max_workers=WIKIPROJECT_FETCH_THREADS)\n', (2462, 2501), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1325, 1353), 'revscoring.utilities.util.read_observations', 'read_observations', (['sys.stdin'], {}), '(sys.stdin)\n', (1342, 1353), False, 'from revscoring.utilities.util import dump_observation, read_observations\n'), ((1845, 1873), 'revscoring.utilities.util.dump_observation', 'dump_observation', (['ob', 'output'], {}), '(ob, output)\n', (1861, 1873), False, 'from revscoring.utilities.util import dump_observation, read_observations\n'), ((2787, 2809), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (2803, 2809), False, 'import sys\n'), ((2710, 2731), 'sys.stderr.write', 'sys.stderr.write', (['"""."""'], {}), "('.')\n", (2726, 2731), False, 'import sys\n'), ((2744, 2762), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (2760, 2762), False, 'import sys\n'), ((4280, 4302), 'itertools.islice', 'islice', (['iterable', 'size'], {}), '(iterable, size)\n', (4286, 4302), False, 'from itertools import islice\n'), ((3571, 3607), 'mw.lib.title.normalize', 'mwtitle.normalize', (["page_doc['title']"], {}), "(page_doc['title'])\n", (3588, 3607), True, 'from mw.lib import title as mwtitle\n'), ((3836, 3857), 'sys.stderr.write', 'sys.stderr.write', (['"""?"""'], {}), "('?')\n", (3852, 3857), False, 'import sys\n'), ((3878, 3913), 'sys.stderr.write', 'sys.stderr.write', (["page_doc['title']"], {}), "(page_doc['title'])\n", (3894, 3913), False, 'import sys\n'), ((3934, 3952), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (3950, 3952), False, 'import sys\n')] |
import exodus
import os
from output_suppression import Suppressor
import numpy as np
def get_nodal_variable_values(filename, varname, step=1):
"""
Extracts nodal field data from exodus file and returns a numpy array of nodal values
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='r')
vals=e.get_node_variable_values(varname, step)
e.close()
return vals
def get_nodal_variable_names(filename):
"""
Returns list of nodal variables present in exodus file
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='r')
names = e.get_node_variable_names()
e.close()
return names
def get_num_elems(filename):
"""
Returns the total number of elements in all blocks
"""
with Suppressor():
e=exodus.exodus(filename, array_type='numpy', mode='r')
val=e.num_elems()
e.close()
return val
def save_nodal_fields_transient(meshfilename, outputfilename, fieldnames, fielddata):
# assert len(fieldnames) == fielddata.shape[1]
# assert get_num_nodes(meshfilename) == fielddata.shape[2]
if os.path.isfile(outputfilename): os.remove(outputfilename)
with Suppressor():
e = exodus.copy_mesh(meshfilename, outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a', array_type="numpy")
exodus.add_variables(e, nodal_vars=fieldnames)
for i,name in enumerate(fieldnames):
for ts in range(fielddata.shape[0]):
e.put_node_variable_values(name,ts+1,fielddata[ts,i,:])
e.put_time(ts+1, ts)
e.close()
def save_nodal_fields_from_structured(meshfilename, outputfilename, fieldnames, fielddata):
if os.path.isfile(outputfilename): os.remove(outputfilename)
with Suppressor():
e = exodus.exodus(meshfilename, array_type='numpy', mode='r')
x,y,z = e.get_coords()
e.close()
ux = np.unique(x.round(decimals=6))
uy = np.unique(y.round(decimals=6))
dx = ux[1] - ux[0]
dy = uy[1] - uy[0]
i = np.rint((x-min(x))/dx)
j = np.rint((y-min(y))/dy)
with Suppressor():
e = exodus.copy_mesh(meshfilename, outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a', array_type="numpy")
exodus.add_variables(e, nodal_vars=fieldnames)
for vidx,name in enumerate(fieldnames):
for ts in range(fielddata.shape[0]):
e.put_node_variable_values(name,ts+1,fielddata[ts,vidx,i.astype(int),j.astype(int)])
e.put_time(ts+1, ts)
e.close()
def save_nodal_fields(meshfilename, outputfilename, fieldnames, fielddata):
if os.path.isfile(outputfilename): os.remove(outputfilename)
with Suppressor():
e = exodus.copy_mesh(meshfilename, outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a', array_type="numpy")
exodus.add_variables(e, nodal_vars=fieldnames)
for i,name in enumerate(fieldnames):
e.put_node_variable_values(name,1,fielddata[i])
e.close()
def normalize_data(filename, outputfilename="normalized-output.e"):
if os.path.isfile(outputfilename): os.remove(outputfilename)
# Copy Mesh
e = exodus.copy_mesh(filename, outputfilename)
e.close()
exo_out = exodus.exodus(outputfilename, mode='a', array_type="numpy")
exo_in = exodus.exodus(filename, mode='r', array_type="numpy")
# Add Variable Names
var_names = exo_in.get_node_variable_names()
gvar_names = exo_in.get_global_variable_names()
exodus.add_variables(exo_out, nodal_vars=var_names)
exo_out.set_global_variable_number(len(gvar_names))
for i,gvar in enumerate(gvar_names):
exo_out.put_global_variable_name(gvar, i+1)
# Compute Var Min/Max
minmax = []
for var in var_names:
print(var)
vmin = float("inf")
vmax = -vmin
for step in range(exo_in.num_times()):
data = exo_in.get_node_variable_values(var, step+1)
vmin = min(vmin, min(data))
vmax = max(vmax, max(data))
print((vmin,vmax))
minmax.append((vmin,vmax))
# Add Data
for step in range(exo_in.num_times()):
for i,var in enumerate(var_names):
data = exo_in.get_node_variable_values(var, step+1)
vmin,vmax = minmax[i]
exo_out.put_node_variable_values(var,step+1,(data-vmin)/(vmax-vmin))
exo_out.put_time(step+1, step)
# Add Global Data
exo_in.close()
exo_out.close()
def append_exodus(filenamelist, outputfilename="joined-output.e", skip_first=0, skip_last=0):
if os.path.isfile(outputfilename): os.remove(outputfilename)
with Suppressor():
# Copy Mesh
e = exodus.copy_mesh(filenamelist[0], outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a', array_type="numpy")
# Add Variable Names
var_names = []
gvar_names = []
for f in filenamelist:
exo = exodus.exodus(f, mode='r', array_type="numpy")
var_names.extend(exo.get_node_variable_names())
gvar_names.extend(exo.get_global_variable_names())
exo.close()
var_names = list(set(var_names))
gvar_names = list(set(gvar_names))
exodus.add_variables(e, nodal_vars=var_names)
e.set_global_variable_number(len(gvar_names))
for i,gvar in enumerate(gvar_names):
e.put_global_variable_name(gvar, i+1)
# Add Variable Data
ts = 1
for f in filenamelist:
exo = exodus.exodus(f, mode='r', array_type="numpy")
for step in range(skip_first, exo.num_times()-skip_last):
for var in exo.get_node_variable_names():
e.put_node_variable_values(var, ts, exo.get_node_variable_values(var, step+1))
if len(gvar_names)>0:
gvar_vals = []
for gvar in exo.get_global_variable_names():
gvar_vals.append(exo.get_global_variable_values(gvar)[step])
e.put_all_global_variable_values(ts, gvar_vals)
e.put_time(ts, ts-1)
ts += 1
exo.close()
e.close()
def append_exodus_ss(filenamelist, outputfilename="joined-output.e", labels=None):
if os.path.isfile(outputfilename): os.remove(outputfilename)
with Suppressor():
# Copy Mesh
e = exodus.copy_mesh(filenamelist[0], outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a', array_type="numpy")
# Add Variable Names
nvar_names = []
gvar_names = []
evar_names = []
for f in filenamelist:
exo = exodus.exodus(f, mode='r', array_type="numpy")
nvar_names.extend(exo.get_node_variable_names())
gvar_names.extend(exo.get_global_variable_names())
evar_names.extend(exo.get_element_variable_names())
exo.close()
if labels:
gvar_names.extend(labels.keys())
nvar_names = list(set(nvar_names))
gvar_names = list(set(gvar_names))
evar_names = list(set(evar_names))
exodus.add_variables(e, nodal_vars=nvar_names, element_vars=evar_names)
e.set_global_variable_number(len(gvar_names))
for i,gvar in enumerate(gvar_names):
e.put_global_variable_name(gvar, i+1)
gvar_vals = {}
for gvar in gvar_names:
gvar_vals[gvar] = []
ts = 1
for f in filenamelist:
exo = exodus.exodus(f, mode='r', array_type="numpy")
step = exo.num_times()
e.put_time(ts, ts)
for var in exo.get_node_variable_names():
e.put_node_variable_values(var, ts, exo.get_node_variable_values(var, step))
for evar in exo.get_element_variable_names():
# TODO: only works for 1 block
e.put_element_variable_values(1, evar, ts, exo.get_element_variable_values(1,evar,step))
for gvar in exo.get_global_variable_names():
val = exo.get_global_variable_value(gvar,step)
gvar_vals[gvar].append(val)
if labels:
for key in labels:
val = labels[key][ts-1]
gvar_vals[key].append(val)
ts += 1
exo.close()
for ts in range(1,e.num_times()+1):
vals=[]
for gvar in e.get_global_variable_names():
vals.append(gvar_vals[gvar][ts-1])
e.put_all_global_variable_values(ts,vals)
e.close()
def isin_sideset(filename, ssname):
with Suppressor():
e = exodus.exodus(filename, mode='r')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
vals = np.zeros(e.num_nodes())
ssid = e.get_side_set_node_list(dictionary[ssname])[1]
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
vals[nid-1] = 1
with Suppressor():
e.close()
return vals
def get_coords(filename):
"""
Returns the spatial coordinates of nodes in all blocks
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='r')
x,y,z = e.get_coords()
e.close()
return x,y,z
def get_node_id_map(filename):
"""
Returns mapping between node index and node id from exodus file
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='r')
nid=e.get_node_id_map()
e.close()
return nid
def add_global_variable(filename, name, vals):
"""
Adds global variable and fills with values
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='a')
num_gvars = e.get_global_variable_number()
e.set_global_variable_number(num_gvars+1)
e.put_global_variable_name(name,num_gvars+1)
for i,val in enumerate(vals):
e.put_global_variable_value(name, i+1, val)
e.close()
def get_num_globals(filename):
"""
Returns number of global variables in the exodus file
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='r')
n=e.get_global_variable_number()
e.close()
return n
def get_all_global_variable_values(filename):
"""
Returns global variable values for all times
"""
with Suppressor():
e = exodus.exodus(filename, array_type='numpy', mode='r')
if e.get_global_variable_number() > 0:
global_data = np.zeros((e.num_times(),e.get_global_variable_number()))
for timestep in range(e.num_times()):
global_data[timestep,:] = e.get_all_global_variable_values(timestep+1)
return global_data
else:
return None
def copy_all_global_variables(filename_source, filename_dest):
source = exodus.exodus(filename_source, array_type='numpy', mode='r')
n = source.get_global_variable_number()
if n>0:
names = source.get_global_variable_names()
dest = exodus.exodus(filename_dest, array_type='numpy', mode='a')
dest.set_global_variable_number(n)
for i,name in enumerate(source.get_global_variable_names()):
dest.put_global_variable_name(name,i+1)
for timestep in range(dest.num_times()):
dest.put_all_global_variable_values(timestep+1, source.get_all_global_variable_values(timestep+1))
def get_num_nodes(filename):
"""
Returns the total number of nodes in all blocks
"""
with Suppressor():
e = exodus.exodus(filename,array_type='numpy', mode='r')
n = e.num_nodes()
e.close()
return n
def get_times(filename):
"""
Returns list of times corresponding to time planes in the exodus file
"""
with Suppressor():
e = exodus.exodus(filename,array_type='numpy', mode='r')
t = e.get_times()
e.close()
return t
def set_times(filename, times):
with Suppressor():
e = exodus.exodus(filename,array_type='numpy', mode='a')
assert len(times)==e.num_times()
for i,t in enumerate(times):
e.put_time(i+1, t)
with Suppressor():
e.close()
| [
"exodus.copy_mesh",
"os.path.isfile",
"output_suppression.Suppressor",
"exodus.exodus",
"exodus.add_variables",
"os.remove"
] | [((1166, 1196), 'os.path.isfile', 'os.path.isfile', (['outputfilename'], {}), '(outputfilename)\n', (1180, 1196), False, 'import os\n'), ((1772, 1802), 'os.path.isfile', 'os.path.isfile', (['outputfilename'], {}), '(outputfilename)\n', (1786, 1802), False, 'import os\n'), ((2729, 2759), 'os.path.isfile', 'os.path.isfile', (['outputfilename'], {}), '(outputfilename)\n', (2743, 2759), False, 'import os\n'), ((3215, 3245), 'os.path.isfile', 'os.path.isfile', (['outputfilename'], {}), '(outputfilename)\n', (3229, 3245), False, 'import os\n'), ((3298, 3340), 'exodus.copy_mesh', 'exodus.copy_mesh', (['filename', 'outputfilename'], {}), '(filename, outputfilename)\n', (3314, 3340), False, 'import exodus\n'), ((3369, 3428), 'exodus.exodus', 'exodus.exodus', (['outputfilename'], {'mode': '"""a"""', 'array_type': '"""numpy"""'}), "(outputfilename, mode='a', array_type='numpy')\n", (3382, 3428), False, 'import exodus\n'), ((3442, 3495), 'exodus.exodus', 'exodus.exodus', (['filename'], {'mode': '"""r"""', 'array_type': '"""numpy"""'}), "(filename, mode='r', array_type='numpy')\n", (3455, 3495), False, 'import exodus\n'), ((3627, 3678), 'exodus.add_variables', 'exodus.add_variables', (['exo_out'], {'nodal_vars': 'var_names'}), '(exo_out, nodal_vars=var_names)\n', (3647, 3678), False, 'import exodus\n'), ((4717, 4747), 'os.path.isfile', 'os.path.isfile', (['outputfilename'], {}), '(outputfilename)\n', (4731, 4747), False, 'import os\n'), ((6367, 6397), 'os.path.isfile', 'os.path.isfile', (['outputfilename'], {}), '(outputfilename)\n', (6381, 6397), False, 'import os\n'), ((11041, 11101), 'exodus.exodus', 'exodus.exodus', (['filename_source'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename_source, array_type='numpy', mode='r')\n", (11054, 11101), False, 'import exodus\n'), ((258, 270), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (268, 270), False, 'from output_suppression import Suppressor\n'), ((284, 337), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (297, 337), False, 'import exodus\n'), ((552, 564), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (562, 564), False, 'from output_suppression import Suppressor\n'), ((578, 631), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (591, 631), False, 'import exodus\n'), ((821, 833), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (831, 833), False, 'from output_suppression import Suppressor\n'), ((845, 898), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (858, 898), False, 'import exodus\n'), ((1198, 1223), 'os.remove', 'os.remove', (['outputfilename'], {}), '(outputfilename)\n', (1207, 1223), False, 'import os\n'), ((1233, 1245), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (1243, 1245), False, 'from output_suppression import Suppressor\n'), ((1259, 1305), 'exodus.copy_mesh', 'exodus.copy_mesh', (['meshfilename', 'outputfilename'], {}), '(meshfilename, outputfilename)\n', (1275, 1305), False, 'import exodus\n'), ((1336, 1395), 'exodus.exodus', 'exodus.exodus', (['outputfilename'], {'mode': '"""a"""', 'array_type': '"""numpy"""'}), "(outputfilename, mode='a', array_type='numpy')\n", (1349, 1395), False, 'import exodus\n'), ((1404, 1450), 'exodus.add_variables', 'exodus.add_variables', (['e'], {'nodal_vars': 'fieldnames'}), '(e, nodal_vars=fieldnames)\n', (1424, 1450), False, 'import exodus\n'), ((1804, 1829), 'os.remove', 'os.remove', (['outputfilename'], {}), '(outputfilename)\n', (1813, 1829), False, 'import os\n'), ((1840, 1852), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (1850, 1852), False, 'from output_suppression import Suppressor\n'), ((1866, 1923), 'exodus.exodus', 'exodus.exodus', (['meshfilename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(meshfilename, array_type='numpy', mode='r')\n", (1879, 1923), False, 'import exodus\n'), ((2174, 2186), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (2184, 2186), False, 'from output_suppression import Suppressor\n'), ((2200, 2246), 'exodus.copy_mesh', 'exodus.copy_mesh', (['meshfilename', 'outputfilename'], {}), '(meshfilename, outputfilename)\n', (2216, 2246), False, 'import exodus\n'), ((2277, 2336), 'exodus.exodus', 'exodus.exodus', (['outputfilename'], {'mode': '"""a"""', 'array_type': '"""numpy"""'}), "(outputfilename, mode='a', array_type='numpy')\n", (2290, 2336), False, 'import exodus\n'), ((2345, 2391), 'exodus.add_variables', 'exodus.add_variables', (['e'], {'nodal_vars': 'fieldnames'}), '(e, nodal_vars=fieldnames)\n', (2365, 2391), False, 'import exodus\n'), ((2761, 2786), 'os.remove', 'os.remove', (['outputfilename'], {}), '(outputfilename)\n', (2770, 2786), False, 'import os\n'), ((2796, 2808), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (2806, 2808), False, 'from output_suppression import Suppressor\n'), ((2822, 2868), 'exodus.copy_mesh', 'exodus.copy_mesh', (['meshfilename', 'outputfilename'], {}), '(meshfilename, outputfilename)\n', (2838, 2868), False, 'import exodus\n'), ((2899, 2958), 'exodus.exodus', 'exodus.exodus', (['outputfilename'], {'mode': '"""a"""', 'array_type': '"""numpy"""'}), "(outputfilename, mode='a', array_type='numpy')\n", (2912, 2958), False, 'import exodus\n'), ((2967, 3013), 'exodus.add_variables', 'exodus.add_variables', (['e'], {'nodal_vars': 'fieldnames'}), '(e, nodal_vars=fieldnames)\n', (2987, 3013), False, 'import exodus\n'), ((3247, 3272), 'os.remove', 'os.remove', (['outputfilename'], {}), '(outputfilename)\n', (3256, 3272), False, 'import os\n'), ((4749, 4774), 'os.remove', 'os.remove', (['outputfilename'], {}), '(outputfilename)\n', (4758, 4774), False, 'import os\n'), ((4785, 4797), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (4795, 4797), False, 'from output_suppression import Suppressor\n'), ((4831, 4880), 'exodus.copy_mesh', 'exodus.copy_mesh', (['filenamelist[0]', 'outputfilename'], {}), '(filenamelist[0], outputfilename)\n', (4847, 4880), False, 'import exodus\n'), ((4911, 4970), 'exodus.exodus', 'exodus.exodus', (['outputfilename'], {'mode': '"""a"""', 'array_type': '"""numpy"""'}), "(outputfilename, mode='a', array_type='numpy')\n", (4924, 4970), False, 'import exodus\n'), ((5383, 5428), 'exodus.add_variables', 'exodus.add_variables', (['e'], {'nodal_vars': 'var_names'}), '(e, nodal_vars=var_names)\n', (5403, 5428), False, 'import exodus\n'), ((5656, 5702), 'exodus.exodus', 'exodus.exodus', (['f'], {'mode': '"""r"""', 'array_type': '"""numpy"""'}), "(f, mode='r', array_type='numpy')\n", (5669, 5702), False, 'import exodus\n'), ((6399, 6424), 'os.remove', 'os.remove', (['outputfilename'], {}), '(outputfilename)\n', (6408, 6424), False, 'import os\n'), ((6435, 6447), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (6445, 6447), False, 'from output_suppression import Suppressor\n'), ((6481, 6530), 'exodus.copy_mesh', 'exodus.copy_mesh', (['filenamelist[0]', 'outputfilename'], {}), '(filenamelist[0], outputfilename)\n', (6497, 6530), False, 'import exodus\n'), ((6561, 6620), 'exodus.exodus', 'exodus.exodus', (['outputfilename'], {'mode': '"""a"""', 'array_type': '"""numpy"""'}), "(outputfilename, mode='a', array_type='numpy')\n", (6574, 6620), False, 'import exodus\n'), ((7233, 7304), 'exodus.add_variables', 'exodus.add_variables', (['e'], {'nodal_vars': 'nvar_names', 'element_vars': 'evar_names'}), '(e, nodal_vars=nvar_names, element_vars=evar_names)\n', (7253, 7304), False, 'import exodus\n'), ((8723, 8735), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (8733, 8735), False, 'from output_suppression import Suppressor\n'), ((8749, 8782), 'exodus.exodus', 'exodus.exodus', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (8762, 8782), False, 'import exodus\n'), ((9112, 9124), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (9122, 9124), False, 'from output_suppression import Suppressor\n'), ((9272, 9284), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (9282, 9284), False, 'from output_suppression import Suppressor\n'), ((9298, 9351), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (9311, 9351), False, 'import exodus\n'), ((9543, 9555), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (9553, 9555), False, 'from output_suppression import Suppressor\n'), ((9569, 9622), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (9582, 9622), False, 'import exodus\n'), ((9808, 9820), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (9818, 9820), False, 'from output_suppression import Suppressor\n'), ((9834, 9887), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""a"""'}), "(filename, array_type='numpy', mode='a')\n", (9847, 9887), False, 'import exodus\n'), ((10275, 10287), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (10285, 10287), False, 'from output_suppression import Suppressor\n'), ((10301, 10354), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (10314, 10354), False, 'import exodus\n'), ((10548, 10560), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (10558, 10560), False, 'from output_suppression import Suppressor\n'), ((10574, 10627), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (10587, 10627), False, 'import exodus\n'), ((11230, 11288), 'exodus.exodus', 'exodus.exodus', (['filename_dest'], {'array_type': '"""numpy"""', 'mode': '"""a"""'}), "(filename_dest, array_type='numpy', mode='a')\n", (11243, 11288), False, 'import exodus\n'), ((11720, 11732), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (11730, 11732), False, 'from output_suppression import Suppressor\n'), ((11746, 11799), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (11759, 11799), False, 'import exodus\n'), ((11981, 11993), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (11991, 11993), False, 'from output_suppression import Suppressor\n'), ((12007, 12060), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""r"""'}), "(filename, array_type='numpy', mode='r')\n", (12020, 12060), False, 'import exodus\n'), ((12159, 12171), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (12169, 12171), False, 'from output_suppression import Suppressor\n'), ((12185, 12238), 'exodus.exodus', 'exodus.exodus', (['filename'], {'array_type': '"""numpy"""', 'mode': '"""a"""'}), "(filename, array_type='numpy', mode='a')\n", (12198, 12238), False, 'import exodus\n'), ((12347, 12359), 'output_suppression.Suppressor', 'Suppressor', ([], {}), '()\n', (12357, 12359), False, 'from output_suppression import Suppressor\n'), ((5097, 5143), 'exodus.exodus', 'exodus.exodus', (['f'], {'mode': '"""r"""', 'array_type': '"""numpy"""'}), "(f, mode='r', array_type='numpy')\n", (5110, 5143), False, 'import exodus\n'), ((6772, 6818), 'exodus.exodus', 'exodus.exodus', (['f'], {'mode': '"""r"""', 'array_type': '"""numpy"""'}), "(f, mode='r', array_type='numpy')\n", (6785, 6818), False, 'import exodus\n'), ((7608, 7654), 'exodus.exodus', 'exodus.exodus', (['f'], {'mode': '"""r"""', 'array_type': '"""numpy"""'}), "(f, mode='r', array_type='numpy')\n", (7621, 7654), False, 'import exodus\n')] |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class PreferencesProfilePeople(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, show_bio: bool=True, show_cv: bool=True, show_job: bool=True, show_other_identities: bool=True, show_professional: bool=True, show_pronouns: bool=True, show_social: bool=True, show_website: bool=True): # noqa: E501
"""PreferencesProfilePeople - a model defined in Swagger
:param show_bio: The show_bio of this PreferencesProfilePeople. # noqa: E501
:type show_bio: bool
:param show_cv: The show_cv of this PreferencesProfilePeople. # noqa: E501
:type show_cv: bool
:param show_job: The show_job of this PreferencesProfilePeople. # noqa: E501
:type show_job: bool
:param show_other_identities: The show_other_identities of this PreferencesProfilePeople. # noqa: E501
:type show_other_identities: bool
:param show_professional: The show_professional of this PreferencesProfilePeople. # noqa: E501
:type show_professional: bool
:param show_pronouns: The show_pronouns of this PreferencesProfilePeople. # noqa: E501
:type show_pronouns: bool
:param show_social: The show_social of this PreferencesProfilePeople. # noqa: E501
:type show_social: bool
:param show_website: The show_website of this PreferencesProfilePeople. # noqa: E501
:type show_website: bool
"""
self.swagger_types = {
'show_bio': bool,
'show_cv': bool,
'show_job': bool,
'show_other_identities': bool,
'show_professional': bool,
'show_pronouns': bool,
'show_social': bool,
'show_website': bool
}
self.attribute_map = {
'show_bio': 'show_bio',
'show_cv': 'show_cv',
'show_job': 'show_job',
'show_other_identities': 'show_other_identities',
'show_professional': 'show_professional',
'show_pronouns': 'show_pronouns',
'show_social': 'show_social',
'show_website': 'show_website'
}
self._show_bio = show_bio
self._show_cv = show_cv
self._show_job = show_job
self._show_other_identities = show_other_identities
self._show_professional = show_professional
self._show_pronouns = show_pronouns
self._show_social = show_social
self._show_website = show_website
@classmethod
def from_dict(cls, dikt) -> 'PreferencesProfilePeople':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The preferences_profile_people of this PreferencesProfilePeople. # noqa: E501
:rtype: PreferencesProfilePeople
"""
return util.deserialize_model(dikt, cls)
@property
def show_bio(self) -> bool:
"""Gets the show_bio of this PreferencesProfilePeople.
:return: The show_bio of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_bio
@show_bio.setter
def show_bio(self, show_bio: bool):
"""Sets the show_bio of this PreferencesProfilePeople.
:param show_bio: The show_bio of this PreferencesProfilePeople.
:type show_bio: bool
"""
self._show_bio = show_bio
@property
def show_cv(self) -> bool:
"""Gets the show_cv of this PreferencesProfilePeople.
:return: The show_cv of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_cv
@show_cv.setter
def show_cv(self, show_cv: bool):
"""Sets the show_cv of this PreferencesProfilePeople.
:param show_cv: The show_cv of this PreferencesProfilePeople.
:type show_cv: bool
"""
self._show_cv = show_cv
@property
def show_job(self) -> bool:
"""Gets the show_job of this PreferencesProfilePeople.
:return: The show_job of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_job
@show_job.setter
def show_job(self, show_job: bool):
"""Sets the show_job of this PreferencesProfilePeople.
:param show_job: The show_job of this PreferencesProfilePeople.
:type show_job: bool
"""
self._show_job = show_job
@property
def show_other_identities(self) -> bool:
"""Gets the show_other_identities of this PreferencesProfilePeople.
:return: The show_other_identities of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_other_identities
@show_other_identities.setter
def show_other_identities(self, show_other_identities: bool):
"""Sets the show_other_identities of this PreferencesProfilePeople.
:param show_other_identities: The show_other_identities of this PreferencesProfilePeople.
:type show_other_identities: bool
"""
self._show_other_identities = show_other_identities
@property
def show_professional(self) -> bool:
"""Gets the show_professional of this PreferencesProfilePeople.
:return: The show_professional of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_professional
@show_professional.setter
def show_professional(self, show_professional: bool):
"""Sets the show_professional of this PreferencesProfilePeople.
:param show_professional: The show_professional of this PreferencesProfilePeople.
:type show_professional: bool
"""
self._show_professional = show_professional
@property
def show_pronouns(self) -> bool:
"""Gets the show_pronouns of this PreferencesProfilePeople.
:return: The show_pronouns of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_pronouns
@show_pronouns.setter
def show_pronouns(self, show_pronouns: bool):
"""Sets the show_pronouns of this PreferencesProfilePeople.
:param show_pronouns: The show_pronouns of this PreferencesProfilePeople.
:type show_pronouns: bool
"""
self._show_pronouns = show_pronouns
@property
def show_social(self) -> bool:
"""Gets the show_social of this PreferencesProfilePeople.
:return: The show_social of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_social
@show_social.setter
def show_social(self, show_social: bool):
"""Sets the show_social of this PreferencesProfilePeople.
:param show_social: The show_social of this PreferencesProfilePeople.
:type show_social: bool
"""
self._show_social = show_social
@property
def show_website(self) -> bool:
"""Gets the show_website of this PreferencesProfilePeople.
:return: The show_website of this PreferencesProfilePeople.
:rtype: bool
"""
return self._show_website
@show_website.setter
def show_website(self, show_website: bool):
"""Sets the show_website of this PreferencesProfilePeople.
:param show_website: The show_website of this PreferencesProfilePeople.
:type show_website: bool
"""
self._show_website = show_website
| [
"swagger_server.util.deserialize_model"
] | [((3116, 3149), 'swagger_server.util.deserialize_model', 'util.deserialize_model', (['dikt', 'cls'], {}), '(dikt, cls)\n', (3138, 3149), False, 'from swagger_server import util\n')] |
# pylint: disable=invalid-name
import os
import platform
from flask import Flask
from flask import render_template, request, send_from_directory
from flask_cors import CORS
import pandas as pd
from app.ABSA import AspectsBased
from app.scrapper import amazonScrapper
EXTERNALS_DIR = os.path.realpath(os.path.join(__file__, '..', 'externals'))
app = Flask(__name__, static_folder="./templates/build/static", template_folder="./templates/build")
CORS(app)
DRIVER_PATH = {
'Linux' : EXTERNALS_DIR + '/chromdriver.unix',
'Darwin' : EXTERNALS_DIR + '/chromedriver.mac',
'Windows' : EXTERNALS_DIR + '\\chromedriver.exe'
}[platform.system()]
@app.route("/test", methods=['POST'])
def test():
return send_from_directory(EXTERNALS_DIR, 'predict.json')
@app.route("/predict", methods=['POST'])
def predict():
url = request.json['url']
maxPages = request.json['maxPages']
scrapper = amazonScrapper(url=url, maxpages=maxPages, driver_path=DRIVER_PATH)
product = scrapper.get_product_data()
reviews = scrapper.get_reviews()
ABSA = AspectsBased(serie=reviews['Comment'])
opinions = ABSA.identifyOpinions()
return {**product, 'opinions': opinions}
@app.route("/")
def render():
return render_template('index.html')
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"flask.send_from_directory",
"flask_cors.CORS",
"flask.Flask",
"os.path.join",
"platform.system",
"app.ABSA.AspectsBased",
"app.scrapper.amazonScrapper"
] | [((352, 451), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""./templates/build/static"""', 'template_folder': '"""./templates/build"""'}), "(__name__, static_folder='./templates/build/static', template_folder=\n './templates/build')\n", (357, 451), False, 'from flask import Flask\n'), ((447, 456), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (451, 456), False, 'from flask_cors import CORS\n'), ((302, 343), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '"""externals"""'], {}), "(__file__, '..', 'externals')\n", (314, 343), False, 'import os\n'), ((632, 649), 'platform.system', 'platform.system', ([], {}), '()\n', (647, 649), False, 'import platform\n'), ((713, 763), 'flask.send_from_directory', 'send_from_directory', (['EXTERNALS_DIR', '"""predict.json"""'], {}), "(EXTERNALS_DIR, 'predict.json')\n", (732, 763), False, 'from flask import render_template, request, send_from_directory\n'), ((908, 975), 'app.scrapper.amazonScrapper', 'amazonScrapper', ([], {'url': 'url', 'maxpages': 'maxPages', 'driver_path': 'DRIVER_PATH'}), '(url=url, maxpages=maxPages, driver_path=DRIVER_PATH)\n', (922, 975), False, 'from app.scrapper import amazonScrapper\n'), ((1072, 1110), 'app.ABSA.AspectsBased', 'AspectsBased', ([], {'serie': "reviews['Comment']"}), "(serie=reviews['Comment'])\n", (1084, 1110), False, 'from app.ABSA import AspectsBased\n'), ((1243, 1272), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1258, 1272), False, 'from flask import render_template, request, send_from_directory\n')] |
from abc import ABC, abstractmethod
import logging
from typing import Any
from ROAR.utilities_module.module import Module
from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl
from collections import deque
import numpy as np
class ROARManiaPlanner(Module):
def __init__(self, agent, **kwargs):
super().__init__(**kwargs)
self.logger = logging
self.logger = logging.getLogger(__name__)
self.agent = agent
self.last_error = None
# boundary of detecting future turn
self.turn_boundary = 0.75
def run_in_series(self, scene) -> Any:
"""
Return the error to PID on.
"""
# Decide between lane or patch first,
# then sort patches by distance and type and return one of them
# scene = {"lane_error": error_lane, "patches": [(type, side, y_offset)], "on_patch": type]}
# type = ["ice", "boost"], side = ["left", "right", "center"], y_offset = float
# Algorithm:
# 1. Follow main lane if a patch is not present.
# 2. If patch is present and desirable, go for it and give the correct lat_error to controller
# 3. After you've gone over patch, return back to main lane as quickly as possible.
# 4. If can't see main lane, repeat previous action.
# CAVEAT: We don't handle the case that we can see patches but not the lane
error = None
if scene["lane_point"] is not None:
#translate lane point into error for pid
error = self.point_to_error(scene["lane_point"])
else:
error = self.last_error
turn_exist = False
if scene["backup_lane_point"] is not None:
#turn_exist = abs(self.point_to_error(scene["backup_lane_point"])) > self.turn_boundary
#print("backup error: ", self.point_to_error(scene["backup_lane_point"]))
pass
else:
turn_exist = True
#print("turn: ", turn_exist)
# We know where the lane is, and there are patches
if scene["patches"]:
scene["patches"].sort(key=lambda patch: patch[1][1]) # patch[1][0] is the y_offset
print("sorted patches: ", scene["patches"])
for i, patch in enumerate(scene["patches"]):
patch_t, patch_point = patch
# y, x = patch_point
if patch_t == "ice" and turn_exist is False:
error = self.avoid(patch_point, error)
# break
if patch_t == "boost" and turn_exist is False:
error = self.pursue(patch_point, error)
self.last_error = error
return error
def avoid(self, point, error):
to_patch = self.point_to_error(point)
return error + (0.4*(error-to_patch))
def pursue(self, point, error):
return 0.5*self.point_to_error(point)
def point_to_error(self, point):
#get pixel_offset from center
pixel_offset = point[1] - self.agent.center_x
print("pixel_offset: ", pixel_offset)
#normalize to [-1, 1]
norm_offset = pixel_offset / 360
print("norm_offset: ", norm_offset)
#scale to have smaller errors be less significant
scaled_error = np.sign(norm_offset) * (abs(norm_offset)**2)
print("scaled_error: ", scaled_error)
return scaled_error
def repeat_prev_action(self):
return None
def run_in_threaded(self, **kwargs):
pass
def save(self, **kwargs):
pass | [
"logging.getLogger",
"numpy.sign"
] | [((406, 433), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (423, 433), False, 'import logging\n'), ((3298, 3318), 'numpy.sign', 'np.sign', (['norm_offset'], {}), '(norm_offset)\n', (3305, 3318), True, 'import numpy as np\n')] |
"""
Defines a set of document formats.
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import datetime
import hashlib
import os
# LO-PHI Automation
import lophi_automation.database.utils as utils
DELIMITER = '_'
def analysis_doc_uid(analyzer_name, analyzer_version, sample_uid):
"""
Returns a string representing the UID of an analysis document for
a particular analyzer and sample.
:param analyzer_name: Name of analyzer.
:type analyzer_name: str
:param analyzer_version: Version of analyzer.
:type analyzer_version: int
:param sample_uid: UID for the sample under analysis.
:type sample_uid: str
:returns: str -- the UID of analysis document for sample
"""
return DELIMITER.join(['analysis', analyzer_name,
str(analyzer_version), sample_uid])
def analysis_doc(analyzer_name, analyzer_version, sample_id, machine_str,
results_dict, exception=None):
"""
Returns a dictionary representing an analysis document.
:param analyzer_name: Name of analyzer generating the document.
:type analyzer_name: str
:param analyzer_version: Version of analyzer generating the document.
:type analyzer_version: int
:param analyzer_doc_uid: UID for the analysis document.
:type analyzer_doc_uid: str
:param sample_uid: UID for the sample under analysis.
:type sample_uid: str
:param results_dict: Dictionary of analysis results.
:type results_dict: dict
:param exception: String representing any exception that occured.
:type exception: str
:returns: dict -- analysis document
"""
curr_time = datetime.datetime.utcnow()
adoc = {
'_id': analysis_doc_uid(analyzer_name, analyzer_version, sample_id),
'type': 'analysis',
'time': {'year': curr_time.year,
'month': curr_time.month,
'day': curr_time.day,
'iso': curr_time.isoformat()
},
'analyzer': {'name': analyzer_name, 'version': analyzer_version},
'machine': machine_str,
'results': results_dict,
'exception': exception,
'sample': sample_id
}
return adoc
def _calculate_link_hash(links):
"""
Creates a hash based on the keys of the links. The names of link
documents will collide when the source, type, and keyspace of the links
are the same.
"""
to_hash = ''.join(sorted(links.keys()))
# Hashlib takes encoded Strings, not Unicode objects
return hashlib.md5(to_hash.encode('utf-8')).hexdigest()
known_link_types = ['origin', 'unpacks_to']
def link_doc_id(link_source, link_type, links):
if link_type not in known_link_types:
raise RuntimeError("%s unknown link type. Known: %s." % \
(link_type, str(known_link_types)))
if not isinstance(links, dict):
raise RuntimeError("Links are not dictionary.")
link_hash = _calculate_link_hash(links)
return DELIMITER.join(['link', link_type, link_source, link_hash])
def link_doc(link_source, link_type, links):
"""
Returns a dictionary representing an link document.
:param link_source: Source of the links (data source, unpacker, etc).
:type link_source: str
:param link_type: Type of link document (origin, unpacks_to, etc).
:type link_type: str
:param links: Dict of links, corresponding to single provenance graph edges.
Key/value semantics for link derivation are defined per link type.
:type links: dict
:returns: dict -- link document
"""
ldoc_id = link_doc_id(link_source, link_type, links)
curr_time = datetime.datetime.utcnow()
ldoc = {
'_id': ldoc_id,
'type': 'link',
'time': {'year': curr_time.year,
'month': curr_time.month,
'day': curr_time.day,
'iso': curr_time.isoformat()
},
'source': link_source,
'link_type': link_type,
'links': links
}
return ldoc
def link_origin_doc(link_source, links, redistribution='none', zip_file=None):
"""
Returns a dictionary representing a link origin document
:param link_source: Source of the links (data source, unpacker, etc).
:type link_source: str
:param links: Dict of links, corresponding to single provenance graph edges.
Key/value semantics for link derivation are:
sample_id -> origin_file
:type links: dict
:returns: dict -- link document
"""
ldoc = link_doc(link_source, 'origin', links)
ldoc['redistribution'] = redistribution
if zip_file:
ldoc['zip_file'] = zip_file
return ldoc
def sample_doc_id(uid):
return DELIMITER.join(['sample', uid])
def sample_doc(file_path, file_doc_id, redistribution="none"):
"""
Returns a dictionary representing a file document
:param file_path: Path to the sample file.
:type file_path: str
:param file_doc_id: File doc id as returned by the upload file db command.
"type file_doc_id: str
:param metadata: Dictionary with additional information about the file.
:type metadata: dict
:returns: dict -- file document
"""
uid = utils.calculate_combined_hash(file_path)
doc_id = sample_doc_id(uid)
return {'_id': doc_id,
'uid': uid,
'file_doc_id': file_doc_id,
'sample': doc_id,
'type': 'sample',
'size': os.path.getsize(file_path),
'md5': utils.calculate_md5(file_path),
'sha1': utils.calculate_sha1(file_path),
'sha256': utils.calculate_sha256(file_path),
'sha512': utils.calculate_sha512(file_path),
'first_uploaded': str(datetime.datetime.now()),
'redistribution': redistribution,
'original filename': os.path.basename(file_path)
}
def file_doc_id(file_path):
return DELIMITER.join(['file', utils.calculate_sha256(file_path)])
| [
"lophi_automation.database.utils.calculate_sha1",
"os.path.getsize",
"lophi_automation.database.utils.calculate_combined_hash",
"datetime.datetime.utcnow",
"lophi_automation.database.utils.calculate_md5",
"datetime.datetime.now",
"lophi_automation.database.utils.calculate_sha512",
"lophi_automation.da... | [((1666, 1692), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1690, 1692), False, 'import datetime\n'), ((3687, 3713), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3711, 3713), False, 'import datetime\n'), ((5277, 5317), 'lophi_automation.database.utils.calculate_combined_hash', 'utils.calculate_combined_hash', (['file_path'], {}), '(file_path)\n', (5306, 5317), True, 'import lophi_automation.database.utils as utils\n'), ((5521, 5547), 'os.path.getsize', 'os.path.getsize', (['file_path'], {}), '(file_path)\n', (5536, 5547), False, 'import os\n'), ((5568, 5598), 'lophi_automation.database.utils.calculate_md5', 'utils.calculate_md5', (['file_path'], {}), '(file_path)\n', (5587, 5598), True, 'import lophi_automation.database.utils as utils\n'), ((5620, 5651), 'lophi_automation.database.utils.calculate_sha1', 'utils.calculate_sha1', (['file_path'], {}), '(file_path)\n', (5640, 5651), True, 'import lophi_automation.database.utils as utils\n'), ((5675, 5708), 'lophi_automation.database.utils.calculate_sha256', 'utils.calculate_sha256', (['file_path'], {}), '(file_path)\n', (5697, 5708), True, 'import lophi_automation.database.utils as utils\n'), ((5732, 5765), 'lophi_automation.database.utils.calculate_sha512', 'utils.calculate_sha512', (['file_path'], {}), '(file_path)\n', (5754, 5765), True, 'import lophi_automation.database.utils as utils\n'), ((5906, 5933), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (5922, 5933), False, 'import os\n'), ((5801, 5824), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5822, 5824), False, 'import datetime\n'), ((6013, 6046), 'lophi_automation.database.utils.calculate_sha256', 'utils.calculate_sha256', (['file_path'], {}), '(file_path)\n', (6035, 6046), True, 'import lophi_automation.database.utils as utils\n')] |
# Member.py
import discord
from discord.ext import commands
from discord.utils import get
import os
import mysql.connector
import isbnlib
from isbnlib import *
import asyncio
ROLE = "Book Worm"
def initdb():
return mysql.connector.connect(
host = os.getenv('HOST'),
user = os.getenv('USER'),
password = os.getenv('PASSWORD'),
database = os.getenv('DATABASE')
)
class Member(commands.Cog):
""" a class filled with all commands related to the members. """
def __init__(self, client):
self.client = client
self.connection = initdb()
def dbcursor(self):
try:
self.connection.ping(reconnect=True, attempts=3, delay=5)
except mysql.connector.Error as err:
self.connection = self.initdb()
return self.connection.cursor()
# View bookworm profile.
@commands.command()
async def profile(self, ctx):
mycursor = self.dbcursor()
mycursor.execute("SET NAMES utf8mb4;")
self.connection.commit()
profile_sql = 'SELECT member_name, read_status, member_count, member_mention FROM GUILD_{} WHERE member_id=%s'.format(ctx.guild.id)
val = (ctx.author.id,)
mycursor.execute(profile_sql, val)
current_profile = mycursor.fetchone()
var_member_name = current_profile[0]
var_read_status = current_profile[1]
var_member_count = current_profile[2]
var_member_mention = current_profile[3]
embed = discord.Embed(colour=discord.Colour.green(), title="{}'s Profile:".format(ctx.author.display_name))
embed.add_field(name='{}\n(📚: {})'.format(var_member_name, var_member_count), value='{}'.format(var_member_mention),
inline=False)
if var_read_status == 1:
embed.set_footer(text="Well done! You've finished the current set book for the club! 🥳")
else:
embed.set_footer(text="It looks like you haven't finished the current set book for the club yet... 🤔")
thumbnail = ctx.author.avatar_url
embed.set_thumbnail(url='{}'.format(thumbnail))
await ctx.send(embed=embed)
# Add to completed books only if book is set within status.
@commands.command()
async def bookfinished(self, ctx):
mycursor = self.dbcursor()
mycursor.execute("SET NAMES utf8mb4;")
self.connection.commit()
profile_sql = 'SELECT member_mention, member_name, read_status, member_count, member_id FROM GUILD_{} WHERE member_id=%s'.format(ctx.guild.id)
val = (ctx.author.id,)
mycursor.execute(profile_sql, val)
current_profile = mycursor.fetchone()
var_member_mention = current_profile[0]
var_member_name = current_profile[1]
var_read_status = current_profile[2]
var_member_count = current_profile[3]
var_member_id = current_profile[4]
count_check_sql = 'SELECT current_book, set_by FROM guilds WHERE guild_id=%s'
val = (ctx.guild.id,)
mycursor.execute(count_check_sql, val)
result = mycursor.fetchone()
# if book status is set and book status matches current book then increment.
var_current_book = (str(result[0]))
var_set_by = result[1]
if var_current_book == 'NULL':
await ctx.send("But there is no set book for the club...? 🤨")
elif int(var_read_status) == 1:
await ctx.send("You've already told me that you've finished the set book for the club! 🤪")
else:
var_member_count = int(var_member_count) + 1
update_guild_sql = "UPDATE GUILD_{} SET member_count=%s, read_status='1' WHERE member_id=%s".format(ctx.guild.id)
val = (var_member_count, ctx.author.id,)
mycursor.execute(update_guild_sql, val)
self.connection.commit()
id = var_current_book + '_' + var_member_id
update_book_sql = "INSERT INTO BOOKS_{} (book_id, member_id, book_isbn, set_by) VALUES (%s, %s, %s, %s)".format(ctx.guild.id)
val = (id, ctx.author.id, var_current_book, var_set_by,)
mycursor.execute(update_book_sql, val)
self.connection.commit()
embed = discord.Embed(colour=discord.Colour.green(), title="{}'s Profile:".format(ctx.author.display_name))
embed.add_field(name='{}\n(📚: {})'.format(var_member_name, var_member_count), value='{}'.format(var_member_mention),
inline=False)
embed.set_footer(text="Well done! You've finished the current set book for the club! 🥳")
thumbnail = ctx.author.avatar_url
embed.set_thumbnail(url='{}'.format(thumbnail))
await ctx.send(embed=embed)
# Returns list of books you've read.
@commands.command()
async def mybooks(self, ctx):
mycursor = self.dbcursor()
mycursor.execute("SET NAMES utf8mb4;")
self.connection.commit()
member_books_sql = 'SELECT book_isbn, set_by FROM BOOKS_{} WHERE member_id=%s'.format(ctx.guild.id)
val = (ctx.author.id,)
mycursor.execute(member_books_sql, val)
results = mycursor.fetchall()
if results:
for result in results:
book = result[0]
var_set_by = result[1]
var_count_books = len(results)
embed = discord.Embed(colour=discord.Colour.green(), title="{}'s Read Books:".format(ctx.author.display_name))
current_book = meta(str(book))
if len(current_book['Authors']) == 0:
embed.add_field(name='{} ({})'.format(current_book['Title'], current_book['Year']), value='No Authors Specified',
inline=False)
else:
embed.add_field(name='{} ({})'.format(current_book['Title'], current_book['Year']),
value=', '.join(current_book['Authors']), inline=False)
if 'ISBN-10' in current_book:
cover_img = current_book['ISBN-10']
elif 'ISBN-13' in current_book:
cover_img = current_book['ISBN-13']
if cover(cover_img):
thumbnail = cover(cover_img)
embed.set_thumbnail(url='{}'.format(thumbnail['thumbnail']))
else:
embed.set_thumbnail(url='https://raw.githubusercontent.com/Iqrahaq/BookWorm/master/img/no_book_cover.jpg')
embed.set_footer(
text="Set by {}.\n{} total books! 😉".format(var_set_by,
var_count_books))
await ctx.send(embed=embed)
else:
embed = discord.Embed(colour=discord.Colour.green(), title="{}'s Read Books:".format(ctx.author.display_name))
embed.add_field(name='¯\_(ツ)_/¯', value='You haven\'t read any books in this club yet!', inline=False)
embed.set_thumbnail(url='https://raw.githubusercontent.com/Iqrahaq/BookWorm/master/img/bookworm-01.png')
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Member(client))
| [
"discord.Colour.green",
"discord.ext.commands.command",
"os.getenv"
] | [((907, 925), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (923, 925), False, 'from discord.ext import commands\n'), ((2296, 2314), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2312, 2314), False, 'from discord.ext import commands\n'), ((4911, 4929), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4927, 4929), False, 'from discord.ext import commands\n'), ((277, 294), 'os.getenv', 'os.getenv', (['"""HOST"""'], {}), "('HOST')\n", (286, 294), False, 'import os\n'), ((312, 329), 'os.getenv', 'os.getenv', (['"""USER"""'], {}), "('USER')\n", (321, 329), False, 'import os\n'), ((351, 372), 'os.getenv', 'os.getenv', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (360, 372), False, 'import os\n'), ((394, 415), 'os.getenv', 'os.getenv', (['"""DATABASE"""'], {}), "('DATABASE')\n", (403, 415), False, 'import os\n'), ((1571, 1593), 'discord.Colour.green', 'discord.Colour.green', ([], {}), '()\n', (1591, 1593), False, 'import discord\n'), ((6954, 6976), 'discord.Colour.green', 'discord.Colour.green', ([], {}), '()\n', (6974, 6976), False, 'import discord\n'), ((4358, 4380), 'discord.Colour.green', 'discord.Colour.green', ([], {}), '()\n', (4378, 4380), False, 'import discord\n'), ((5543, 5565), 'discord.Colour.green', 'discord.Colour.green', ([], {}), '()\n', (5563, 5565), False, 'import discord\n')] |
from django.contrib.contenttypes.models import ContentType
from nautobot.dcim.models import Site
from nautobot.extras.choices import CustomFieldTypeChoices
from nautobot.extras.jobs import Job
from nautobot.extras.models import CustomField
class TestCreateSiteWithCustomField(Job):
class Meta:
name = "Site and Custom Field Creation"
description = "Site with a custom field"
def run(self, data, commit):
obj_type = ContentType.objects.get_for_model(Site)
cf = CustomField.objects.create(name="cf1", type=CustomFieldTypeChoices.TYPE_TEXT, default="-")
cf.content_types.set([obj_type])
self.log_success(obj=cf, message="CustomField created successfully.")
site_1 = Site.objects.create(name="Test Site One", slug="test-site-one")
site_1.cf[cf.name] = "some-value"
site_1.save()
self.log_success(obj=site_1, message="Created a new site")
site_2 = Site.objects.create(name="Test Site Two", slug="test-site-two")
self.log_success(obj=site_2, message="Created another new site")
return "Job completed."
| [
"nautobot.dcim.models.Site.objects.create",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"nautobot.extras.models.CustomField.objects.create"
] | [((451, 490), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['Site'], {}), '(Site)\n', (484, 490), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((504, 599), 'nautobot.extras.models.CustomField.objects.create', 'CustomField.objects.create', ([], {'name': '"""cf1"""', 'type': 'CustomFieldTypeChoices.TYPE_TEXT', 'default': '"""-"""'}), "(name='cf1', type=CustomFieldTypeChoices.\n TYPE_TEXT, default='-')\n", (530, 599), False, 'from nautobot.extras.models import CustomField\n'), ((733, 796), 'nautobot.dcim.models.Site.objects.create', 'Site.objects.create', ([], {'name': '"""Test Site One"""', 'slug': '"""test-site-one"""'}), "(name='Test Site One', slug='test-site-one')\n", (752, 796), False, 'from nautobot.dcim.models import Site\n'), ((946, 1009), 'nautobot.dcim.models.Site.objects.create', 'Site.objects.create', ([], {'name': '"""Test Site Two"""', 'slug': '"""test-site-two"""'}), "(name='Test Site Two', slug='test-site-two')\n", (965, 1009), False, 'from nautobot.dcim.models import Site\n')] |
from collections import namedtuple
import numpy as np
import pandas as pd
import rdkit.Chem as Chem
from tqdm import tqdm
from neural_fingerprint import NFPRegressor
from neural_fingerprint.chemutils import rf_evaluation
from neural_fingerprint.models.ecfp import ECFP
max_val = 1000
train_idx = 800
def read_data(max_val, train_idx):
df_zinc = pd.read_table("./data/train.txt", header=None)
target = pd.read_table("./data/train.logP-SA", header=None)
df_zinc.columns = ["smiles"]
target.columns = ["target"]
df = pd.concat([df_zinc.iloc[0:max_val, :], target.iloc[0:max_val, :]], axis=1)
# train_smiles, test_smiles = df.smiles[0:train_idx], df.smiles[train_idx:]
train_y, test_y = df.target[0:train_idx], df.target[train_idx:]
mols = [Chem.MolFromSmiles(smi) for smi in tqdm(df.smiles)]
train_mols, test_mols = mols[0:train_idx], mols[train_idx:]
return df, train_mols, test_mols, train_y, test_y
def benchmark():
_, train_mols, test_mols, train_y, test_y = read_data(max_val, train_idx)
# Neural Fingerprint
print("Neural fingerprint")
model = NFPRegressor(hidden_dim=64, depth=2, nbits=16)
model.fit(train_mols, train_y, epochs=10, verbose=True)
train_pred, train_fps = model.predict(train_mols, return_fps=True)
test_pred, test_fps = model.predict(test_mols, return_fps=True)
# NFP + MLP
print("Neural fingerprint + MLP")
rf_evaluation(train_pred, test_pred, train_y, test_y)
# NFP + Random Forest
print("Neural fingerprint + Random Forest")
rf_evaluation(train_fps, test_fps, train_y, test_y)
def mapping_nodes_eample(train_fps, test_fps):
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.manifold import TSNE
df, _, _, train_y, test_y = read_data(max_val, train_idx)
fps = np.vstack((train_fps, test_fps))
label = np.hstack([np.zeros(800), np.ones(200)])
tsne = TSNE(n_components=2).fit_transform(fps)
tes = np.c_[tsne, df.target.to_numpy(), label]
gp = GaussianProcessRegressor()
gp.fit(train_fps, train_y)
xmin, xmax = min(tes[:, 0]), max(tes[:, 0])
ymin, ymax = min(tes[:, 1]), max(tes[:, 1])
zmin, zmax = min(tes[:, 2]), max(tes[:, 2])
gp = GaussianProcessRegressor()
gp.fit(tes[:, 0:2], tes[:, 2])
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
xx, yy = np.meshgrid(
np.linspace(xmin - 1, xmax + 1, 200), np.linspace(ymin - 1, ymax + 1, 200)
)
xxyy = np.array([xx.ravel(), yy.ravel()]).T
z1 = gp.predict(xxyy)
z1 = z1.reshape(-1, 200)
# plt.scatter(tes[:, 0], tes[:, 1])
plt.pcolor(xx, yy, z1, alpha=0.5, cmap=cm.jet, vmin=zmin, vmax=zmax)
plt.colorbar()
plt.show()
if __name__ == "__main__":
benchmark()
| [
"sklearn.gaussian_process.GaussianProcessRegressor",
"neural_fingerprint.chemutils.rf_evaluation",
"numpy.ones",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.colorbar",
"rdkit.Chem.MolFromSmiles",
"tqdm.tqdm",
"neural_fingerprint.NFPRegressor",
"sklearn.manifold.TSNE",
"numpy.linspace",
"numpy... | [((354, 400), 'pandas.read_table', 'pd.read_table', (['"""./data/train.txt"""'], {'header': 'None'}), "('./data/train.txt', header=None)\n", (367, 400), True, 'import pandas as pd\n'), ((414, 464), 'pandas.read_table', 'pd.read_table', (['"""./data/train.logP-SA"""'], {'header': 'None'}), "('./data/train.logP-SA', header=None)\n", (427, 464), True, 'import pandas as pd\n'), ((540, 614), 'pandas.concat', 'pd.concat', (['[df_zinc.iloc[0:max_val, :], target.iloc[0:max_val, :]]'], {'axis': '(1)'}), '([df_zinc.iloc[0:max_val, :], target.iloc[0:max_val, :]], axis=1)\n', (549, 614), True, 'import pandas as pd\n'), ((1112, 1158), 'neural_fingerprint.NFPRegressor', 'NFPRegressor', ([], {'hidden_dim': '(64)', 'depth': '(2)', 'nbits': '(16)'}), '(hidden_dim=64, depth=2, nbits=16)\n', (1124, 1158), False, 'from neural_fingerprint import NFPRegressor\n'), ((1417, 1470), 'neural_fingerprint.chemutils.rf_evaluation', 'rf_evaluation', (['train_pred', 'test_pred', 'train_y', 'test_y'], {}), '(train_pred, test_pred, train_y, test_y)\n', (1430, 1470), False, 'from neural_fingerprint.chemutils import rf_evaluation\n'), ((1550, 1601), 'neural_fingerprint.chemutils.rf_evaluation', 'rf_evaluation', (['train_fps', 'test_fps', 'train_y', 'test_y'], {}), '(train_fps, test_fps, train_y, test_y)\n', (1563, 1601), False, 'from neural_fingerprint.chemutils import rf_evaluation\n'), ((1829, 1861), 'numpy.vstack', 'np.vstack', (['(train_fps, test_fps)'], {}), '((train_fps, test_fps))\n', (1838, 1861), True, 'import numpy as np\n'), ((2028, 2054), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (2052, 2054), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((2240, 2266), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {}), '()\n', (2264, 2266), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((2677, 2745), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['xx', 'yy', 'z1'], {'alpha': '(0.5)', 'cmap': 'cm.jet', 'vmin': 'zmin', 'vmax': 'zmax'}), '(xx, yy, z1, alpha=0.5, cmap=cm.jet, vmin=zmin, vmax=zmax)\n', (2687, 2745), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2764), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2762, 2764), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2779), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2777, 2779), True, 'import matplotlib.pyplot as plt\n'), ((775, 798), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (793, 798), True, 'import rdkit.Chem as Chem\n'), ((2449, 2485), 'numpy.linspace', 'np.linspace', (['(xmin - 1)', '(xmax + 1)', '(200)'], {}), '(xmin - 1, xmax + 1, 200)\n', (2460, 2485), True, 'import numpy as np\n'), ((2487, 2523), 'numpy.linspace', 'np.linspace', (['(ymin - 1)', '(ymax + 1)', '(200)'], {}), '(ymin - 1, ymax + 1, 200)\n', (2498, 2523), True, 'import numpy as np\n'), ((810, 825), 'tqdm.tqdm', 'tqdm', (['df.smiles'], {}), '(df.smiles)\n', (814, 825), False, 'from tqdm import tqdm\n'), ((1885, 1898), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (1893, 1898), True, 'import numpy as np\n'), ((1900, 1912), 'numpy.ones', 'np.ones', (['(200)'], {}), '(200)\n', (1907, 1912), True, 'import numpy as np\n'), ((1927, 1947), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1931, 1947), False, 'from sklearn.manifold import TSNE\n')] |
#
# core/urls.py
#
# Author: <NAME>
#
from django.conf.urls import include
from django.contrib.auth.decorators import login_required
from django.urls import path
from . import views
urlpatterns = [
path('', login_required(views.index), name='index'),
path('libraries/', login_required(views.libraries), name='libraries'),
path('models/', login_required(views.models), name='models'),
path('models/svm/', login_required(views.SVMModelListView.as_view()), name='svm_models'),
path('models/svm/<model_name>', login_required(views.SVMModelDetailView.as_view()), name='svm_model_detail'),
path('models/svm/<model_name>/genes', login_required(views.svm_genes), name='svm_genes'),
path('models/rf/', login_required(views.RFModelListView.as_view()), name='rf_models'),
path('models/rf/<model_name>', login_required(views.RFModelDetailView.as_view()), name='rf_model_detail'),
path('models/rf/<model_name>/genes', login_required(views.rf_genes), name='rf_genes'),
path('search/diseases', login_required(views.search_disease), name='search_disease'),
path('search/studies', login_required(views.search_study), name='search_study'),
path('search/samples', login_required(views.search_sample), name='search_sample'),
path('diseases/', login_required(views.DiseaseListView.as_view()), name='diseases'),
path('diseases/create', login_required(views.DiseaseCreateView.as_view()), name='create_disease'),
path('diseases/<abbreviation>', login_required(views.DiseaseDetailView.as_view()), name='disease_detail'),
path('diseases/<abbreviation>/update', login_required(views.DiseaseUpdate.as_view()), name='disease_update'),
path('diseases/<abbreviation>/confirm_delete', login_required(views.DiseaseDelete.as_view()), name='disease_delete'),
path('studies/', login_required(views.StudyListView.as_view()), name='studies'),
path('studies/create', login_required(views.StudyCreateView.as_view()), name='create_study'),
path('studies/<title>', login_required(views.StudyDetailView.as_view()), name='study_detail'),
path('studies/<title>/update', login_required(views.StudyUpdate.as_view()), name='study_update'),
path('studies/<title>/confirm_delete', login_required(views.StudyDelete.as_view()), name='study_delete'),
path('studies/<title>/count_data/create', login_required(views.CountDataCreateView.as_view()), name='count_data_create'),
path('studies/<title>/count_data/<name>', login_required(views.CountDataDetailView.as_view()), name='count_data_detail'),
path('studies/<title>/count_data/<name>/update', login_required(views.CountDataUpdate.as_view()), name='count_data_update'),
path('studies/<title>/count_data/<name>/confirm_delete', login_required(views.CountDataDelete.as_view()), name='count_data_delete'),
path('studies/<title>/count_data/<name>/samples', login_required(views.SampleListView.as_view()), name='samples'),
path('studies/<title>/count_data/<name>/samples/import', login_required(views.import_sample), name='import_sample'),
path('studies/<title>/count_data/<name>/samples/<sample_ID>', login_required(views.SampleDetailView.as_view()), name='sample_detail'),
] | [
"django.contrib.auth.decorators.login_required"
] | [((215, 242), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.index'], {}), '(views.index)\n', (229, 242), False, 'from django.contrib.auth.decorators import login_required\n'), ((282, 313), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.libraries'], {}), '(views.libraries)\n', (296, 313), False, 'from django.contrib.auth.decorators import login_required\n'), ((354, 382), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.models'], {}), '(views.models)\n', (368, 382), False, 'from django.contrib.auth.decorators import login_required\n'), ((651, 682), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.svm_genes'], {}), '(views.svm_genes)\n', (665, 682), False, 'from django.contrib.auth.decorators import login_required\n'), ((951, 981), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.rf_genes'], {}), '(views.rf_genes)\n', (965, 981), False, 'from django.contrib.auth.decorators import login_required\n'), ((1034, 1070), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.search_disease'], {}), '(views.search_disease)\n', (1048, 1070), False, 'from django.contrib.auth.decorators import login_required\n'), ((1123, 1157), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.search_study'], {}), '(views.search_study)\n', (1137, 1157), False, 'from django.contrib.auth.decorators import login_required\n'), ((1208, 1243), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.search_sample'], {}), '(views.search_sample)\n', (1222, 1243), False, 'from django.contrib.auth.decorators import login_required\n'), ((3011, 3046), 'django.contrib.auth.decorators.login_required', 'login_required', (['views.import_sample'], {}), '(views.import_sample)\n', (3025, 3046), False, 'from django.contrib.auth.decorators import login_required\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from collections import namedtuple
import queue
Item = namedtuple("Item", ['index', 'value', 'weight', 'density'])
# Node = namedtuple("Node", ["level", "profit", "bound", "weight"])
class Node(object):
def __init__(self, level=-1, profit=0, weight=0, bound=0):
self.level = level
self.profit = profit
self.weight = weight
self.bound = bound
def solve_it(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
firstLine = lines[0].split()
item_count = int(firstLine[0])
capacity = int(firstLine[1])
items = []
weights = []
values = []
for i in range(1, item_count + 1):
line = lines[i]
parts = line.split()
v = int(parts[0])
w = int(parts[1])
d = v / w
weights.append(w)
values.append(v)
items.append(Item(i - 1, v, w, d))
# Sort the items
items = sorted(items, key=lambda x: x.density, reverse=True)
n = len(items)
value = bb_solution(items, capacity, n)
# print(value)
# prepare the solution in the specified output format
output_data = str(value) + ' ' + str(1) + '\n'
output_data += ' '.join(map(str, []))
return output_data
def greedy_solution(n, k, items):
value = 0
weight = 0
taken = [0] * n
for item in items:
if weight + item.weight <= k:
taken[item.index] = 1
value += item.value
weight += item.weight
return value, taken
def total_value(items, max_weight):
return sum([x.value for x in items]) if sum([x.weight for x in items]) <= max_weight else 0
# v = list of item values or profit
# w = list of item weight or cost
# W = max weight or max cost for the knapsack
def zeroOneKnapsack(v, w, W):
# c is the cost matrix
c = []
n = len(v)
c = zeros(n, W + 1)
for i in range(0, n):
# for ever possible weight
for j in range(0, W + 1):
# can we add this item to this?
if (w[i] > j):
c[i][j] = c[i - 1][j]
else:
c[i][j] = max(c[i - 1][j], v[i] + c[i - 1][j - w[i]])
return [c[n - 1][W], getUsedItems(w, c)]
# w = list of item weight or cost
# c = the cost matrix created by the dynamic programming solution
def getUsedItems(w, c):
# item count
i = len(c) - 1
currentW = len(c[0]) - 1
# set everything to not marked
marked = []
for i in range(i + 1):
marked.append(0)
while (i >= 0 and currentW >= 0):
if (i == 0 and c[i][currentW] > 0) or c[i][currentW] != c[i - 1][currentW]:
marked[i] = 1
currentW = currentW - w[i]
i = i - 1
return marked
def zeros(rows, cols):
row = []
data = []
for i in range(cols):
row.append(0)
for i in range(rows):
data.append(row[:])
return data
def bound(u, n, max_weight, items):
if u.weight >= max_weight:
return 0
profit_bound = u.profit
j = u.level + 1
total_weight = u.weight
while j < n and total_weight + items[j].weight <= max_weight:
total_weight += items[j].weight
profit_bound += items[j].value
j += 1
if j < n:
profit_bound += (max_weight - total_weight) * items[j].density
return profit_bound
def bb_solution(items, max_weight, n):
q = queue.Queue()
v = Node()
_u = Node()
q.put(_u)
max_profit = 0
while not q.empty():
u = q.get()
if u.level == -1:
v.level = 0
if u.level == n-1:
continue
v.level = u.level + 1
v.weight = u.weight + items[v.level].weight
v.profit = u.profit + items[v.level].value
if v.weight <= max_weight and v.profit > max_profit:
max_profit = v.profit
v.bound = bound(v, n, max_weight, items)
if v.bound > max_profit:
q.put(v)
return max_profit
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print(
'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)')
| [
"queue.Queue",
"collections.namedtuple"
] | [((99, 158), 'collections.namedtuple', 'namedtuple', (['"""Item"""', "['index', 'value', 'weight', 'density']"], {}), "('Item', ['index', 'value', 'weight', 'density'])\n", (109, 158), False, 'from collections import namedtuple\n'), ((3468, 3481), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3479, 3481), False, 'import queue\n')] |
import os
from pathlib import Path
def make_backup(c, file):
if file.exists() and not file.is_symlink():
file_backup = Path('{}.orig'.format(file))
if not file_backup.exists():
c.run('mv -f {} {}'.format(file, file_backup))
return True
return False
def symlink(c, source, destination, backup=True):
cwd = Path(Path.cwd(), c.cwd)
destination = Path(destination).expanduser()
# os.path.relpath is different than Path.relative_to which wouldn't work here
source = os.path.relpath(Path(cwd, source), destination.parent)
if backup:
make_backup(c, destination)
c.run('mkdir -p {}'.format(destination.parent))
c.run('rm -f {}'.format(destination))
with c.cd(destination.parent):
c.run('ln -fs {} {}'.format(source, destination.name))
| [
"pathlib.Path.cwd",
"pathlib.Path"
] | [((367, 377), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (375, 377), False, 'from pathlib import Path\n'), ((546, 563), 'pathlib.Path', 'Path', (['cwd', 'source'], {}), '(cwd, source)\n', (550, 563), False, 'from pathlib import Path\n'), ((404, 421), 'pathlib.Path', 'Path', (['destination'], {}), '(destination)\n', (408, 421), False, 'from pathlib import Path\n')] |
import importlib
import os
import sys
from typing import Any, Callable, Optional
from mkdocs.config import Config, config_options
from mkdocs.livereload import LiveReloadServer
from mkdocs.plugins import BasePlugin
from .collectors import ModuleCollector
class Plugin(BasePlugin):
"""
MkDocs Plugin class.
"""
_root_module: Optional[ModuleCollector] = None
# out-dir is config["docs_dir"]
config_scheme = (
("module", config_options.Type(str, required=True)),
("out-name", config_options.Type(str, default=None)),
)
def root_module(self, config: Config) -> ModuleCollector:
if not self._root_module:
out_dir = config["docs_dir"]
out_name = self.config["out-name"]
root_name = self.config["module"]
_root_module = importlib.import_module(root_name)
self._root_module = ModuleCollector(
_root_module, out_dir, out_name=out_name, enable_yaml_header=True
)
return self._root_module
def on_config(self, config: Config, **kw: Any) -> Config:
md_ext = config.get("markdown_extensions", [])
if "attr_list" not in md_ext:
md_ext.append("attr_list")
if "meta" not in md_ext:
md_ext.append("meta")
config["markdown_extantions"] = md_ext
return config
def on_serve(
self,
server: LiveReloadServer,
config: Config,
builder: Callable[[], None],
**kw: Any
) -> LiveReloadServer:
self._build(config)
# add watching path.
module_path = self.config["module"].replace(".", "/")
server.watch(module_path)
return server
def on_pre_build(self, config: Config) -> None:
"""Build markdown docs from python modules."""
self._build(config)
def _build(self, config: Config) -> None:
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
# create docs.
self.root_module(config).write()
| [
"mkdocs.config.config_options.Type",
"sys.path.append",
"importlib.import_module",
"os.getcwd"
] | [((1914, 1925), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1923, 1925), False, 'import os\n'), ((456, 495), 'mkdocs.config.config_options.Type', 'config_options.Type', (['str'], {'required': '(True)'}), '(str, required=True)\n', (475, 495), False, 'from mkdocs.config import Config, config_options\n'), ((519, 557), 'mkdocs.config.config_options.Type', 'config_options.Type', (['str'], {'default': 'None'}), '(str, default=None)\n', (538, 557), False, 'from mkdocs.config import Config, config_options\n'), ((824, 858), 'importlib.import_module', 'importlib.import_module', (['root_name'], {}), '(root_name)\n', (847, 858), False, 'import importlib\n'), ((1970, 1990), 'sys.path.append', 'sys.path.append', (['cwd'], {}), '(cwd)\n', (1985, 1990), False, 'import sys\n')] |
# _*_ encoding: utf-8 _*_
__author__ = 'nzb'
__date__ = '2020/6/6 17:47'
__doc__ = "二值化(Thresholding)"
import cv2
import numpy as np
# 计算: y={0 (if y<128 ) 255 (else)
def Grayscale(img):
b = img[:,:,0].copy()
g = img[:,:,1].copy()
r = img[:,:,2].copy()
ret = 0.2126 * r + 0.7152 * g + 0.0722 * b
return ret.astype(np.uint8)
def Thresholding(img, th=128):
img[img < th] = 0
img[img >= th] = 250
return img
img = cv2.imread("../../assets/imori.jpg").astype(np.float)
img2 = Grayscale(img)
img2 = Thresholding(img2)
# cv2.imshow("img", img)
cv2.imshow("ret", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.imread",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imshow"
] | [((579, 602), 'cv2.imshow', 'cv2.imshow', (['"""ret"""', 'img2'], {}), "('ret', img2)\n", (589, 602), False, 'import cv2\n'), ((604, 618), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (615, 618), False, 'import cv2\n'), ((619, 642), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (640, 642), False, 'import cv2\n'), ((451, 487), 'cv2.imread', 'cv2.imread', (['"""../../assets/imori.jpg"""'], {}), "('../../assets/imori.jpg')\n", (461, 487), False, 'import cv2\n')] |
"""Test for pretty format table from jsons."""
from datetime import datetime
from tabulate import tabulate
from ecs_tasks_ops import pretty_json
from ecs_tasks_ops import pretty_table
def test_tabulate_list_json_empty():
"""Test empty data."""
out = pretty_table.tabulate_list_json()
assert "No data to show" == out
def test_tabulate_list_json_empty_msg():
"""Test empty data with empty msg."""
empty_msg = "No information"
out = pretty_table.tabulate_list_json(empty_msg=empty_msg)
assert empty_msg == out
def test_tabulate_list_json_data():
"""Test all data tabulation."""
data = [{"foo": 1, "bar": "hi", "none": None}]
out = pretty_table.tabulate_list_json(data)
assert tabulate(data, headers={"foo": "foo", "bar": "bar", "none": "none"}) == out
def test_tabulate_list_json_only_one():
"""Test only first attribute data tabulation."""
data = [{"foo": 1, "bar": "hi", "none": None}]
data_test = [{"foo": 1}]
out = pretty_table.tabulate_list_json(data, fields_to=1)
assert tabulate(data_test, headers={"foo": "foo"}) == out
def test_tabulate_list_json_from_one_to_three():
"""Test from one to there (no inclusive) attribute data tabulation."""
data = [{"foo": 1, "bar": "hi", "none": None}]
data_test = [{"bar": "hi", "none": None}]
out = pretty_table.tabulate_list_json(data, fields_from=1, fields_to=3)
assert tabulate(data_test, headers={"bar": "bar", "none": "none"}) == out
def test_tabulate_list_json_from_out_of_index():
"""Test out of index tabulation."""
data = [{"foo": 1, "bar": "hi", "none": None}]
out = pretty_table.tabulate_list_json(data, fields_from=4)
assert "" == out
def test_tabulate_list_json_to_out_of_index():
"""Test out of index tabulation."""
data = [{"foo": 1, "bar": "hi", "none": None}]
out = pretty_table.tabulate_list_json(data, fields_to=4)
assert tabulate(data, headers={"foo": "foo", "bar": "bar", "none": "none"}) == out
def test_tabulate_list_json_keys():
"""Test tabulation where you can specify keys."""
data = [{"foo": 1, "bar": "hi", "none": None}]
data_test = [{"foo": 1, "none": None}]
out = pretty_table.tabulate_list_json_keys(data, keys=["foo", "none"])
assert tabulate(data_test, headers={"foo": "foo", "none": "none"}) == out
def test_tabulate_list_json_keys_non_exists():
"""Test tabulation where you can specify keys, but that keys don't exists."""
data = [{"foo": 1, "bar": "hi", "none": None}]
out = pretty_table.tabulate_list_json_keys(data, keys=["another", "no_included"])
assert "" == out
def test_json_object_date():
"""Testing a json code with datetime."""
now = datetime.now()
data = [{"name": "Object name", "date": now}]
out = pretty_json.dumps(data, indent=None)
assert out == '[{"name": "Object name", "date": "' + now.isoformat() + '"}]'
def test_encoder_not_date():
"""Testing a encoder object not date."""
now = datetime.now()
dte = pretty_json.DateTimeEncoder()
out = dte.default(now)
assert out == now.isoformat()
out = dte.default(34)
assert out is None
| [
"ecs_tasks_ops.pretty_json.DateTimeEncoder",
"ecs_tasks_ops.pretty_table.tabulate_list_json_keys",
"tabulate.tabulate",
"ecs_tasks_ops.pretty_table.tabulate_list_json",
"datetime.datetime.now",
"ecs_tasks_ops.pretty_json.dumps"
] | [((262, 295), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', ([], {}), '()\n', (293, 295), False, 'from ecs_tasks_ops import pretty_table\n'), ((460, 512), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', ([], {'empty_msg': 'empty_msg'}), '(empty_msg=empty_msg)\n', (491, 512), False, 'from ecs_tasks_ops import pretty_table\n'), ((676, 713), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', (['data'], {}), '(data)\n', (707, 713), False, 'from ecs_tasks_ops import pretty_table\n'), ((986, 1036), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', (['data'], {'fields_to': '(1)'}), '(data, fields_to=1)\n', (1017, 1036), False, 'from ecs_tasks_ops import pretty_table\n'), ((1332, 1397), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', (['data'], {'fields_from': '(1)', 'fields_to': '(3)'}), '(data, fields_from=1, fields_to=3)\n', (1363, 1397), False, 'from ecs_tasks_ops import pretty_table\n'), ((1628, 1680), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', (['data'], {'fields_from': '(4)'}), '(data, fields_from=4)\n', (1659, 1680), False, 'from ecs_tasks_ops import pretty_table\n'), ((1852, 1902), 'ecs_tasks_ops.pretty_table.tabulate_list_json', 'pretty_table.tabulate_list_json', (['data'], {'fields_to': '(4)'}), '(data, fields_to=4)\n', (1883, 1902), False, 'from ecs_tasks_ops import pretty_table\n'), ((2186, 2250), 'ecs_tasks_ops.pretty_table.tabulate_list_json_keys', 'pretty_table.tabulate_list_json_keys', (['data'], {'keys': "['foo', 'none']"}), "(data, keys=['foo', 'none'])\n", (2222, 2250), False, 'from ecs_tasks_ops import pretty_table\n'), ((2521, 2596), 'ecs_tasks_ops.pretty_table.tabulate_list_json_keys', 'pretty_table.tabulate_list_json_keys', (['data'], {'keys': "['another', 'no_included']"}), "(data, keys=['another', 'no_included'])\n", (2557, 2596), False, 'from ecs_tasks_ops import pretty_table\n'), ((2704, 2718), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2716, 2718), False, 'from datetime import datetime\n'), ((2779, 2815), 'ecs_tasks_ops.pretty_json.dumps', 'pretty_json.dumps', (['data'], {'indent': 'None'}), '(data, indent=None)\n', (2796, 2815), False, 'from ecs_tasks_ops import pretty_json\n'), ((2983, 2997), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2995, 2997), False, 'from datetime import datetime\n'), ((3008, 3037), 'ecs_tasks_ops.pretty_json.DateTimeEncoder', 'pretty_json.DateTimeEncoder', ([], {}), '()\n', (3035, 3037), False, 'from ecs_tasks_ops import pretty_json\n'), ((725, 793), 'tabulate.tabulate', 'tabulate', (['data'], {'headers': "{'foo': 'foo', 'bar': 'bar', 'none': 'none'}"}), "(data, headers={'foo': 'foo', 'bar': 'bar', 'none': 'none'})\n", (733, 793), False, 'from tabulate import tabulate\n'), ((1048, 1091), 'tabulate.tabulate', 'tabulate', (['data_test'], {'headers': "{'foo': 'foo'}"}), "(data_test, headers={'foo': 'foo'})\n", (1056, 1091), False, 'from tabulate import tabulate\n'), ((1409, 1468), 'tabulate.tabulate', 'tabulate', (['data_test'], {'headers': "{'bar': 'bar', 'none': 'none'}"}), "(data_test, headers={'bar': 'bar', 'none': 'none'})\n", (1417, 1468), False, 'from tabulate import tabulate\n'), ((1914, 1982), 'tabulate.tabulate', 'tabulate', (['data'], {'headers': "{'foo': 'foo', 'bar': 'bar', 'none': 'none'}"}), "(data, headers={'foo': 'foo', 'bar': 'bar', 'none': 'none'})\n", (1922, 1982), False, 'from tabulate import tabulate\n'), ((2262, 2321), 'tabulate.tabulate', 'tabulate', (['data_test'], {'headers': "{'foo': 'foo', 'none': 'none'}"}), "(data_test, headers={'foo': 'foo', 'none': 'none'})\n", (2270, 2321), False, 'from tabulate import tabulate\n')] |
import logging
import socketserver
import threading
from robottelemetryservice.infrastructure.serialisation import serializer
from robottelemetryservice.infrastructure.configuration import application
log=logging.getLogger(__name__)
measurement_repository=application.get_measurement_repository()
class UDPMeasurementEventHandler(socketserver.DatagramRequestHandler):
def handle(self):
udp_payload=self.request[0].strip().decode()
ip_address=self.client_address[0]
log.info(ip_address+":"+udp_payload)
# measurements=serializer.udp_msg_to_measurements(udp_payload)
# measurement_repository.add(measurements)
| [
"logging.getLogger",
"robottelemetryservice.infrastructure.configuration.application.get_measurement_repository"
] | [((206, 233), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (223, 233), False, 'import logging\n'), ((257, 297), 'robottelemetryservice.infrastructure.configuration.application.get_measurement_repository', 'application.get_measurement_repository', ([], {}), '()\n', (295, 297), False, 'from robottelemetryservice.infrastructure.configuration import application\n')] |
import base64
import os
import unittest
from unittest.mock import patch
from click.testing import CliRunner
from keeper_secrets_manager_core.core import SecretsManager
from keeper_secrets_manager_core.storage import InMemoryKeyValueStorage
from keeper_secrets_manager_core import mock
from keeper_secrets_manager_core.mock import MockConfig
from keeper_secrets_manager_cli.profile import Profile
from keeper_secrets_manager_cli.__main__ import cli
from keeper_secrets_manager_cli.config import Config
import tempfile
import configparser
import json
import re
class ProfileTest(unittest.TestCase):
def setUp(self) -> None:
self.orig_dir = os.getcwd()
self.temp_dir = tempfile.TemporaryDirectory()
os.chdir(self.temp_dir.name)
# Clear env var from other tests
os.environ.pop("KSM_CONFIG", None)
os.environ.pop("KSM_CONFIG_BASE64_1", None)
os.environ.pop("KSM_CONFIG_BASE64_DESC_1", None)
os.environ.pop("KSM_CONFIG_BASE64_2", None)
os.environ.pop("KSM_CONFIG_BASE64_DESC_2", None)
self.delete_me = []
def tearDown(self) -> None:
os.chdir(self.orig_dir)
os.environ.pop("KSM_CONFIG", None)
os.environ.pop("KSM_CONFIG_BASE64_1", None)
os.environ.pop("KSM_CONFIG_BASE64_DESC_1", None)
os.environ.pop("KSM_CONFIG_BASE64_2", None)
os.environ.pop("KSM_CONFIG_BASE64_DESC_2", None)
for item in self.delete_me:
if os.path.exists(item) is True:
os.unlink(item)
def test_the_works(self):
""" Test initializing the profile
"""
mock_config = MockConfig.make_config()
secrets_manager = SecretsManager(config=InMemoryKeyValueStorage(mock_config))
res = mock.Response()
one = res.add_record(title="My Record 1")
one.field("login", "My Login 1")
one.field("password", "<PASSWORD>")
one.custom_field("My Custom 1", "custom1")
queue = mock.ResponseQueue(client=secrets_manager)
queue.add_response(res)
queue.add_response(res)
queue.add_response(res)
with patch('keeper_secrets_manager_cli.KeeperCli.get_client') as mock_client:
mock_client.return_value = secrets_manager
# Create a keeper.ini with the default profile
default_token = "XYZ<PASSWORD>"
runner = CliRunner()
result = runner.invoke(cli, ['profile', 'init', '-t', default_token], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success for default init")
self.assertTrue(os.path.exists(Config.default_ini_file), "could not find ini file")
# Add to the keeper.ini a new profile
test_token = "ABC123"
result = runner.invoke(cli, ['profile', 'init', "-p", "test", '-t',
test_token], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success for test init")
self.assertTrue(os.path.exists(Config.default_ini_file), "could not find ini file")
config = configparser.ConfigParser(allow_no_value=True)
config.read(Config.default_ini_file)
# We should have two profiles now.
self.assertTrue(Profile.default_profile in config, "Could not find the default profile in the config.")
self.assertTrue("test" in config, "Could not find the test profile in the config.")
# ------------------------
result = runner.invoke(cli, ['profile', 'list', '--json'], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
profiles = json.loads(result.output)
default_item = next((profile for profile in profiles if profile["name"] == Profile.default_profile), None)
self.assertIsNotNone(default_item, "could not find default profile in list")
self.assertTrue(default_item["active"], "default profile is not active")
test_item = next((profile for profile in profiles if profile["name"] == "test"), None)
self.assertIsNotNone(test_item, "could not find default profile in list")
self.assertFalse(test_item["active"], "test profile is active")
# ------------------------
result = runner.invoke(cli, ['profile', 'active', 'test'], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on active")
result = runner.invoke(cli, ['profile', 'list', '--json'], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
profiles = json.loads(result.output)
default_item = next((profile for profile in profiles if profile["name"] == Profile.default_profile), None)
self.assertIsNotNone(default_item, "could not find default profile in list")
self.assertFalse(default_item["active"], "default profile is active")
test_item = next((profile for profile in profiles if profile["name"] == "test"), None)
self.assertIsNotNone(test_item, "could not find default profile in list")
self.assertTrue(test_item["active"], "test profile is not active")
def test_config_ini_import_export(self):
mock_config = MockConfig.make_config()
ini_config = '''
[_default]
clientid = D_XXXXX_CI
privatekey = D_XXXXX_PK
appkey = D_XXXX_AK
hostname = {}
[Another]
clientid = A_XXXXX_CI
privatekey = A_XXXXX_PK
appkey = A_XXXX_AK
hostname = {}
[_config]
active_profile = _default
color = True
'''.format(mock_config.get("hostname"), mock_config.get("hostname"))
runner = CliRunner()
# Test INI import
base64_config = base64.urlsafe_b64encode(ini_config.encode())
self.assertFalse(os.path.exists("keeper.ini"), "an ini config file already exists")
result = runner.invoke(cli, ['profile', 'import', base64_config.decode()], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
self.assertTrue(os.path.exists("keeper.ini"), "the ini config doesn't exists")
with open("keeper.ini", "r") as fh:
file_config = fh.read()
fh.close()
self.assertEqual(ini_config, file_config, "config on disk and defined above are not the same.")
# Test INI export. Get the 'Another' profile
result = runner.invoke(cli, ['profile', 'export', "Another"], catch_exceptions=False)
print(result.output)
self.assertEqual(0, result.exit_code, "did not get a success on list")
config_data = result.output
try:
config = base64.urlsafe_b64decode(config_data).decode()
self.assertRegex(config, r'A_XXXXX_CI', 'did not find the Another client id')
self.assertFalse(re.search(r'D_XXXXX_CI', config, re.MULTILINE), 'found the default client id')
except Exception as err:
self.fail("Could not base64 decode the config: {}".format(err))
def test_config_json_import_export(self):
json_config = MockConfig.make_config()
runner = CliRunner()
# Test INI import
base64_config = base64.urlsafe_b64encode(json.dumps(json_config).encode())
self.assertFalse(os.path.exists("keeper.ini"), "an ini config file already exists")
result = runner.invoke(cli, ['profile', 'import', base64_config.decode()], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
self.assertTrue(os.path.exists("keeper.ini"), "the ini config doesn't exists")
with open("keeper.ini", "r") as fh:
file_config = fh.read()
fh.close()
assert json_config["clientId"] in file_config, "did not find the client id"
assert json_config["privateKey"] in file_config, "blah"
config = configparser.ConfigParser(allow_no_value=True)
config.read("keeper.ini")
self.assertEqual(json_config["clientId"], config["_default"].get("clientid"), "client keys match")
result = runner.invoke(cli, ['profile', 'export', '--file-format=json'],
catch_exceptions=False)
print(result.output)
self.assertEqual(0, result.exit_code, "did not get a success on list")
config_data = result.output
try:
test_config = base64.urlsafe_b64decode(config_data).decode()
config = json.loads(test_config)
self.assertEqual(json_config["hostname"], config["hostname"], "host name is not the same")
except Exception as err:
self.fail("Could not base64/json decode the config: {}".format(err))
def test_auto_config(self):
json_config = MockConfig.make_config()
base64_config = base64.urlsafe_b64encode(json.dumps(json_config).encode())
runner = CliRunner()
# Create two configs
os.environ["KSM_CONFIG_BASE64_1"] = base64_config.decode()
os.environ["KSM_CONFIG_BASE64_DESC_1"] = "App1"
os.environ["KSM_CONFIG_BASE64_2"] = base64_config.decode()
os.environ["KSM_CONFIG_BASE64_DESC_2"] = "App2"
# Using a file output due to cli runner joining stdout and stderr
with tempfile.NamedTemporaryFile(delete=True) as tf:
tf_name = tf.name
self.delete_me.append(tf_name)
tf.close()
result = runner.invoke(cli, [
'-o', tf_name,
'profile', 'list', '--json'], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
with open(tf_name, "r") as fh:
profile_data = json.load(fh)
self.assertEqual("App1", profile_data[0]["name"], "found first app")
self.assertEqual("App2", profile_data[1]["name"], "found second app")
fh.close()
def test_import_sdk_json(self):
mock_config = MockConfig.make_config()
base64_json = MockConfig.make_base64(config=mock_config)
runner = CliRunner()
result = runner.invoke(cli, ['profile', 'import', base64_json], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
self.assertTrue(os.path.exists("keeper.ini"), "the ini config doesn't exists")
config = configparser.ConfigParser(allow_no_value=True)
config.read(Config.default_ini_file)
profile = config["_default"]
self.assertIsNotNone(profile, "could not find the profile")
self.assertEqual(mock_config.get("appKey"), profile.get("appKey"), "did not get the correct app key")
self.assertEqual(mock_config.get("hostname"), profile.get("hostname"), "did not get the correct hostname")
def test_auto_config_sdk_jenkins_json(self):
"""
Test a multi config via env vars.
The Jenkins plugin can export multiple configs. This allows the KSM CLI inside of a
build to use two application. Most likely no one will ever use this, but we need to
test for it.
"""
mock_config = MockConfig.make_config()
base64_json = MockConfig.make_base64(config=mock_config)
runner = CliRunner()
# Create two configs
os.environ["KSM_CONFIG_BASE64_1"] = base64_json
os.environ["KSM_CONFIG_BASE64_DESC_1"] = "SDK 1"
os.environ["KSM_CONFIG_BASE64_2"] = base64_json
os.environ["KSM_CONFIG_BASE64_DESC_2"] = "SDK 2"
# Using a file output due to cli runner joining stdout and stderr
with tempfile.NamedTemporaryFile(delete=False) as tf:
tf_name = tf.name
self.delete_me.append(tf_name)
tf.close()
# Make sure keeper ini file doesn't exists
if os.path.exists(Config.default_ini_file) is True:
os.unlink(Config.default_ini_file)
result = runner.invoke(cli, [
'-o', tf_name,
'profile', 'list', '--json'], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
with open(tf_name, "r") as fh:
profile_data = json.load(fh)
self.assertEqual("SDK 1", profile_data[0]["name"], "did not find first app")
self.assertEqual("SDK 2", profile_data[1]["name"], "did not find second app")
self.assertFalse(os.path.exists(Config.default_ini_file), "keeper.ini exists when it should not")
fh.close()
def test_auto_config_sdk_base64_json(self):
"""
Test a base64 config via env vars.
Most people will use it this way.
"""
mock_config = MockConfig.make_config()
base64_json = MockConfig.make_base64(config=mock_config)
runner = CliRunner()
# Create two configs
os.environ["KSM_CONFIG"] = base64_json
# Using a file output due to cli runner joining stdout and stderr
with tempfile.NamedTemporaryFile() as tf:
tf_name = tf.name
self.delete_me.append(tf_name)
tf.close()
# Make sure keeper ini file doesn't exists
if os.path.exists(Config.default_ini_file) is True:
os.unlink(Config.default_ini_file)
result = runner.invoke(cli, [
'-o', tf_name,
'profile', 'list', '--json'], catch_exceptions=False)
self.assertEqual(0, result.exit_code, "did not get a success on list")
with open(tf_name, "r") as fh:
profile_data = json.load(fh)
self.assertEqual(Profile.default_profile, profile_data[0]["name"], "did not find default profile")
self.assertFalse(os.path.exists(Config.default_ini_file), "keeper.ini exists when it should not")
fh.close()
if __name__ == '__main__':
unittest.main()
| [
"configparser.ConfigParser",
"keeper_secrets_manager_core.mock.MockConfig.make_base64",
"unittest.main",
"unittest.mock.patch",
"re.search",
"os.path.exists",
"keeper_secrets_manager_core.storage.InMemoryKeyValueStorage",
"json.dumps",
"keeper_secrets_manager_core.mock.Response",
"os.unlink",
"t... | [((14095, 14110), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14108, 14110), False, 'import unittest\n'), ((653, 664), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (662, 664), False, 'import os\n'), ((689, 718), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (716, 718), False, 'import tempfile\n'), ((727, 755), 'os.chdir', 'os.chdir', (['self.temp_dir.name'], {}), '(self.temp_dir.name)\n', (735, 755), False, 'import os\n'), ((806, 840), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG"""', 'None'], {}), "('KSM_CONFIG', None)\n", (820, 840), False, 'import os\n'), ((849, 892), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_1"""', 'None'], {}), "('KSM_CONFIG_BASE64_1', None)\n", (863, 892), False, 'import os\n'), ((901, 949), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_DESC_1"""', 'None'], {}), "('KSM_CONFIG_BASE64_DESC_1', None)\n", (915, 949), False, 'import os\n'), ((958, 1001), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_2"""', 'None'], {}), "('KSM_CONFIG_BASE64_2', None)\n", (972, 1001), False, 'import os\n'), ((1010, 1058), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_DESC_2"""', 'None'], {}), "('KSM_CONFIG_BASE64_DESC_2', None)\n", (1024, 1058), False, 'import os\n'), ((1129, 1152), 'os.chdir', 'os.chdir', (['self.orig_dir'], {}), '(self.orig_dir)\n', (1137, 1152), False, 'import os\n'), ((1162, 1196), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG"""', 'None'], {}), "('KSM_CONFIG', None)\n", (1176, 1196), False, 'import os\n'), ((1205, 1248), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_1"""', 'None'], {}), "('KSM_CONFIG_BASE64_1', None)\n", (1219, 1248), False, 'import os\n'), ((1257, 1305), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_DESC_1"""', 'None'], {}), "('KSM_CONFIG_BASE64_DESC_1', None)\n", (1271, 1305), False, 'import os\n'), ((1314, 1357), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_2"""', 'None'], {}), "('KSM_CONFIG_BASE64_2', None)\n", (1328, 1357), False, 'import os\n'), ((1366, 1414), 'os.environ.pop', 'os.environ.pop', (['"""KSM_CONFIG_BASE64_DESC_2"""', 'None'], {}), "('KSM_CONFIG_BASE64_DESC_2', None)\n", (1380, 1414), False, 'import os\n'), ((1638, 1662), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (1660, 1662), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((1765, 1780), 'keeper_secrets_manager_core.mock.Response', 'mock.Response', ([], {}), '()\n', (1778, 1780), False, 'from keeper_secrets_manager_core import mock\n'), ((1985, 2027), 'keeper_secrets_manager_core.mock.ResponseQueue', 'mock.ResponseQueue', ([], {'client': 'secrets_manager'}), '(client=secrets_manager)\n', (2003, 2027), False, 'from keeper_secrets_manager_core import mock\n'), ((5401, 5425), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (5423, 5425), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((5777, 5788), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5786, 5788), False, 'from click.testing import CliRunner\n'), ((7215, 7239), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (7237, 7239), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((7258, 7269), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7267, 7269), False, 'from click.testing import CliRunner\n'), ((8024, 8070), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'allow_no_value': '(True)'}), '(allow_no_value=True)\n', (8049, 8070), False, 'import configparser\n'), ((8900, 8924), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (8922, 8924), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((9026, 9037), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (9035, 9037), False, 'from click.testing import CliRunner\n'), ((10120, 10144), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (10142, 10144), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((10167, 10209), 'keeper_secrets_manager_core.mock.MockConfig.make_base64', 'MockConfig.make_base64', ([], {'config': 'mock_config'}), '(config=mock_config)\n', (10189, 10209), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((10228, 10239), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (10237, 10239), False, 'from click.testing import CliRunner\n'), ((10521, 10567), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'allow_no_value': '(True)'}), '(allow_no_value=True)\n', (10546, 10567), False, 'import configparser\n'), ((11291, 11315), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (11313, 11315), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((11338, 11380), 'keeper_secrets_manager_core.mock.MockConfig.make_base64', 'MockConfig.make_base64', ([], {'config': 'mock_config'}), '(config=mock_config)\n', (11360, 11380), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((11399, 11410), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (11408, 11410), False, 'from click.testing import CliRunner\n'), ((12900, 12924), 'keeper_secrets_manager_core.mock.MockConfig.make_config', 'MockConfig.make_config', ([], {}), '()\n', (12922, 12924), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((12947, 12989), 'keeper_secrets_manager_core.mock.MockConfig.make_base64', 'MockConfig.make_base64', ([], {'config': 'mock_config'}), '(config=mock_config)\n', (12969, 12989), False, 'from keeper_secrets_manager_core.mock import MockConfig\n'), ((13008, 13019), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (13017, 13019), False, 'from click.testing import CliRunner\n'), ((2138, 2194), 'unittest.mock.patch', 'patch', (['"""keeper_secrets_manager_cli.KeeperCli.get_client"""'], {}), "('keeper_secrets_manager_cli.KeeperCli.get_client')\n", (2143, 2194), False, 'from unittest.mock import patch\n'), ((2391, 2402), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2400, 2402), False, 'from click.testing import CliRunner\n'), ((3147, 3193), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'allow_no_value': '(True)'}), '(allow_no_value=True)\n', (3172, 3193), False, 'import configparser\n'), ((3745, 3770), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (3755, 3770), False, 'import json\n'), ((4750, 4775), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (4760, 4775), False, 'import json\n'), ((5911, 5939), 'os.path.exists', 'os.path.exists', (['"""keeper.ini"""'], {}), "('keeper.ini')\n", (5925, 5939), False, 'import os\n'), ((6189, 6217), 'os.path.exists', 'os.path.exists', (['"""keeper.ini"""'], {}), "('keeper.ini')\n", (6203, 6217), False, 'import os\n'), ((7406, 7434), 'os.path.exists', 'os.path.exists', (['"""keeper.ini"""'], {}), "('keeper.ini')\n", (7420, 7434), False, 'import os\n'), ((7684, 7712), 'os.path.exists', 'os.path.exists', (['"""keeper.ini"""'], {}), "('keeper.ini')\n", (7698, 7712), False, 'import os\n'), ((8602, 8625), 'json.loads', 'json.loads', (['test_config'], {}), '(test_config)\n', (8612, 8625), False, 'import json\n'), ((9402, 9442), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (9429, 9442), False, 'import tempfile\n'), ((10440, 10468), 'os.path.exists', 'os.path.exists', (['"""keeper.ini"""'], {}), "('keeper.ini')\n", (10454, 10468), False, 'import os\n'), ((11755, 11796), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (11782, 11796), False, 'import tempfile\n'), ((13185, 13214), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (13212, 13214), False, 'import tempfile\n'), ((1467, 1487), 'os.path.exists', 'os.path.exists', (['item'], {}), '(item)\n', (1481, 1487), False, 'import os\n'), ((1513, 1528), 'os.unlink', 'os.unlink', (['item'], {}), '(item)\n', (1522, 1528), False, 'import os\n'), ((1712, 1748), 'keeper_secrets_manager_core.storage.InMemoryKeyValueStorage', 'InMemoryKeyValueStorage', (['mock_config'], {}), '(mock_config)\n', (1735, 1748), False, 'from keeper_secrets_manager_core.storage import InMemoryKeyValueStorage\n'), ((2629, 2668), 'os.path.exists', 'os.path.exists', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (2643, 2668), False, 'import os\n'), ((3057, 3096), 'os.path.exists', 'os.path.exists', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (3071, 3096), False, 'import os\n'), ((6957, 7002), 're.search', 're.search', (['"""D_XXXXX_CI"""', 'config', 're.MULTILINE'], {}), "('D_XXXXX_CI', config, re.MULTILINE)\n", (6966, 7002), False, 'import re\n'), ((9848, 9861), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (9857, 9861), False, 'import json\n'), ((11971, 12010), 'os.path.exists', 'os.path.exists', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (11985, 12010), False, 'import os\n'), ((12036, 12070), 'os.unlink', 'os.unlink', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (12045, 12070), False, 'import os\n'), ((12373, 12386), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (12382, 12386), False, 'import json\n'), ((13389, 13428), 'os.path.exists', 'os.path.exists', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (13403, 13428), False, 'import os\n'), ((13454, 13488), 'os.unlink', 'os.unlink', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (13463, 13488), False, 'import os\n'), ((13791, 13804), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (13800, 13804), False, 'import json\n'), ((6791, 6828), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['config_data'], {}), '(config_data)\n', (6815, 6828), False, 'import base64\n'), ((7346, 7369), 'json.dumps', 'json.dumps', (['json_config'], {}), '(json_config)\n', (7356, 7369), False, 'import json\n'), ((8534, 8571), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['config_data'], {}), '(config_data)\n', (8558, 8571), False, 'import base64\n'), ((8974, 8997), 'json.dumps', 'json.dumps', (['json_config'], {}), '(json_config)\n', (8984, 8997), False, 'import json\n'), ((12608, 12647), 'os.path.exists', 'os.path.exists', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (12622, 12647), False, 'import os\n'), ((13954, 13993), 'os.path.exists', 'os.path.exists', (['Config.default_ini_file'], {}), '(Config.default_ini_file)\n', (13968, 13993), False, 'import os\n')] |
import os
import pytest
from unittest import mock
from breaking_changes import inspector
from tests import mod
TEST_ROOT = os.path.dirname(__file__)
BASE_ROOT = os.path.abspath(os.path.join(TEST_ROOT, '..'))
def relative(subpath: str) -> str:
return os.path.abspath(os.path.join(TEST_ROOT, subpath))
def func(a, b=None):
pass
def test_argspec():
inspector.function_args(func) == {'args': ['a', 'b'], 'keywords': None, 'defaults': (None,), 'varargs': None}
def test_list_functions():
assert list(inspector.functions(relative('package1/a.py'), BASE_ROOT)) == ['func1', 'func2']
def test_get_public_interface():
inspector.public_interface(mod) == [('public_func', mod.public_func)]
@pytest.mark.parametrize(('pkg_name', 'packages'), [
('package1', ['a', 'b']),
('package2', ['a', 'nested.c'])
])
def test_package_collection(pkg_name: str, packages: list):
assert list(inspector.modules(relative(pkg_name), skip_tests=False)) == packages
@pytest.mark.parametrize(('full', 'base', 'module'), [
('/some/long/path/to/module.py', '/some/long/path/to', 'module'),
('/some/long/path/to/module.py', '/some/long/path', 'to.module'),
])
def test_module_transformation(full, base, module):
assert inspector.module_transform(full, base) == module
| [
"breaking_changes.inspector.public_interface",
"breaking_changes.inspector.module_transform",
"os.path.join",
"os.path.dirname",
"pytest.mark.parametrize",
"breaking_changes.inspector.function_args"
] | [((127, 152), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (142, 152), False, 'import os\n'), ((716, 830), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('pkg_name', 'packages')", "[('package1', ['a', 'b']), ('package2', ['a', 'nested.c'])]"], {}), "(('pkg_name', 'packages'), [('package1', ['a', 'b']),\n ('package2', ['a', 'nested.c'])])\n", (739, 830), False, 'import pytest\n'), ((985, 1180), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('full', 'base', 'module')", "[('/some/long/path/to/module.py', '/some/long/path/to', 'module'), (\n '/some/long/path/to/module.py', '/some/long/path', 'to.module')]"], {}), "(('full', 'base', 'module'), [(\n '/some/long/path/to/module.py', '/some/long/path/to', 'module'), (\n '/some/long/path/to/module.py', '/some/long/path', 'to.module')])\n", (1008, 1180), False, 'import pytest\n'), ((181, 210), 'os.path.join', 'os.path.join', (['TEST_ROOT', '""".."""'], {}), "(TEST_ROOT, '..')\n", (193, 210), False, 'import os\n'), ((276, 308), 'os.path.join', 'os.path.join', (['TEST_ROOT', 'subpath'], {}), '(TEST_ROOT, subpath)\n', (288, 308), False, 'import os\n'), ((368, 397), 'breaking_changes.inspector.function_args', 'inspector.function_args', (['func'], {}), '(func)\n', (391, 397), False, 'from breaking_changes import inspector\n'), ((643, 674), 'breaking_changes.inspector.public_interface', 'inspector.public_interface', (['mod'], {}), '(mod)\n', (669, 674), False, 'from breaking_changes import inspector\n'), ((1245, 1283), 'breaking_changes.inspector.module_transform', 'inspector.module_transform', (['full', 'base'], {}), '(full, base)\n', (1271, 1283), False, 'from breaking_changes import inspector\n')] |
import logging
from logging.handlers import RotatingFileHandler
from flask.logging import default_handler
from app import app
if __name__ == '__main__':
handler = RotatingFileHandler(filename='./log/app.log', maxBytes=1048576, backupCount=3)
formatter = logging.Formatter(fmt='%(asctime)s - %(name)s[line:%(lineno)d] - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
""" # flask.logging.create_logger
logger = logging.getLogger('flask.app')
if app.debug and logger.level == logging.NOTSET:
logger.setLevel(logging.DEBUG)
if not has_level_handler(logger):
logger.addHandler(default_handler)
return logger
"""
default_handler.setLevel(logging.INFO)
app.logger.addHandler(default_handler)
app.run(host='0.0.0.0', port=9000)
| [
"app.app.run",
"logging.Formatter",
"logging.handlers.RotatingFileHandler",
"app.app.logger.addHandler",
"flask.logging.default_handler.setLevel"
] | [((168, 246), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', ([], {'filename': '"""./log/app.log"""', 'maxBytes': '(1048576)', 'backupCount': '(3)'}), "(filename='./log/app.log', maxBytes=1048576, backupCount=3)\n", (187, 246), False, 'from logging.handlers import RotatingFileHandler\n'), ((263, 395), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s - %(name)s[line:%(lineno)d] - %(levelname)s - %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(fmt=\n '%(asctime)s - %(name)s[line:%(lineno)d] - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (280, 395), False, 'import logging\n'), ((463, 493), 'app.app.logger.addHandler', 'app.logger.addHandler', (['handler'], {}), '(handler)\n', (484, 493), False, 'from app import app\n'), ((783, 821), 'flask.logging.default_handler.setLevel', 'default_handler.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (807, 821), False, 'from flask.logging import default_handler\n'), ((826, 864), 'app.app.logger.addHandler', 'app.logger.addHandler', (['default_handler'], {}), '(default_handler)\n', (847, 864), False, 'from app import app\n'), ((870, 904), 'app.app.run', 'app.run', ([], {'host': '"""0.0.0.0"""', 'port': '(9000)'}), "(host='0.0.0.0', port=9000)\n", (877, 904), False, 'from app import app\n')] |
#!/usr/bin/env python
# manual
"""
This script allows you to manually control the simulator or Duckiebot
using the keyboard arrows.
"""
import os
os.sys.path.append("../../gym-duckietown")
import cv2
import sys
import argparse
import pyglet
from pyglet.window import key
import numpy as np
import gym
import gym_duckietown
#from gym_duckietown.envs import DuckietownEnv
from gym_duckietown.envs.duckietown_env import *
from gym_duckietown.wrappers import UndistortWrapper
from NaiveLaneDetection import NaiveLaneDetection
steer = 0
throttle = 0
# from experiments.utils import save_img
env = DuckietownLF(map_name='straight_road',
max_steps=1500,
draw_curve=False,
draw_bbox=False,
domain_rand=False,
frame_rate=30,
frame_skip=1,
camera_width=640,
camera_height=480,
robot_speed=1.20, #MAXIMUM FORWARD ROBOT SPEED
accept_start_angle_deg=5,
full_transparency=False,
user_tile_start=None,
seed=None,
distortion=False,
randomize_maps_on_reset=False
)
env.reset()
env.render()
vertices = np.array([[(0,200), (640,200), (640,480), (0,480)]])
kernel = 5
low_threshold = 50
high_threshold = 150
rho = 1
theta = np.pi/180
threshold = 10
min_line_len = 10
max_line_gap = 10
NLD = NaiveLaneDetection(vertices, kernel, low_threshold, high_threshold, rho, theta, threshold, min_line_len, max_line_gap)
cv2.namedWindow("Lines")
cv2.namedWindow("Averaged Line")
@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
"""
This handler processes keyboard commands that
control the simulation
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render()
elif symbol == key.PAGEUP:
env.unwrapped.cam_angle[0] = 0
elif symbol == key.ESCAPE:
env.close()
sys.exit(0)
# Take a screenshot
# UNCOMMENT IF NEEDED - Skimage dependency
# elif symbol == key.RETURN:
# print('saving screenshot')
# img = env.render('rgb_array')
# save_img('screenshot.png', img)
# Register a keyboard handler
key_handler = key.KeyStateHandler()
env.unwrapped.window.push_handlers(key_handler)
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
return cv2.addWeighted(initial_img, α, img, β, γ)
def update(dt):
"""
This function is called at every frame to handle
movement/stepping and redrawing
"""
action = np.array([0.0, 0.0])
if key_handler[key.UP]:
action = np.array([0.44, 0.0])
if key_handler[key.DOWN]:
action = np.array([-0.44, 0])
if key_handler[key.LEFT]:
action = np.array([0.35, +1])
if key_handler[key.RIGHT]:
action = np.array([0.35, -1])
if key_handler[key.SPACE]:
action = np.array([0, 0])
# Speed boost
if key_handler[key.LSHIFT]:
action *= 1.5
obs, reward, done, info = env.step(action)
lines, avg_lines, left_line, right_line = NLD.Perceive(obs)
final = weighted_img(avg_lines, obs)
cv2.imshow("Lines", lines)
cv2.imshow("Averaged Line", cv2.cvtColor(final, cv2.COLOR_BGR2RGB))
cv2.waitKey(1)
print('step_count = %s, reward=%.3f' % (env.unwrapped.step_count, reward))
if key_handler[key.RETURN]:
from PIL import Image
im = Image.fromarray(obs)
im.save('screen.png')
if done:
print('done!')
env.reset()
env.render()
env.render()
pyglet.clock.schedule_interval(update, 1.0 / env.unwrapped.frame_rate)
# Enter main event loop
pyglet.app.run()
env.close()
| [
"PIL.Image.fromarray",
"pyglet.window.key.KeyStateHandler",
"pyglet.clock.schedule_interval",
"pyglet.app.run",
"NaiveLaneDetection.NaiveLaneDetection",
"os.sys.path.append",
"cv2.imshow",
"numpy.array",
"cv2.addWeighted",
"cv2.cvtColor",
"sys.exit",
"cv2.waitKey",
"cv2.namedWindow"
] | [((147, 189), 'os.sys.path.append', 'os.sys.path.append', (['"""../../gym-duckietown"""'], {}), "('../../gym-duckietown')\n", (165, 189), False, 'import os\n'), ((1290, 1346), 'numpy.array', 'np.array', (['[[(0, 200), (640, 200), (640, 480), (0, 480)]]'], {}), '([[(0, 200), (640, 200), (640, 480), (0, 480)]])\n', (1298, 1346), True, 'import numpy as np\n'), ((1478, 1600), 'NaiveLaneDetection.NaiveLaneDetection', 'NaiveLaneDetection', (['vertices', 'kernel', 'low_threshold', 'high_threshold', 'rho', 'theta', 'threshold', 'min_line_len', 'max_line_gap'], {}), '(vertices, kernel, low_threshold, high_threshold, rho,\n theta, threshold, min_line_len, max_line_gap)\n', (1496, 1600), False, 'from NaiveLaneDetection import NaiveLaneDetection\n'), ((1597, 1621), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Lines"""'], {}), "('Lines')\n", (1612, 1621), False, 'import cv2\n'), ((1622, 1654), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Averaged Line"""'], {}), "('Averaged Line')\n", (1637, 1654), False, 'import cv2\n'), ((2345, 2366), 'pyglet.window.key.KeyStateHandler', 'key.KeyStateHandler', ([], {}), '()\n', (2364, 2366), False, 'from pyglet.window import key\n'), ((3680, 3750), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['update', '(1.0 / env.unwrapped.frame_rate)'], {}), '(update, 1.0 / env.unwrapped.frame_rate)\n', (3710, 3750), False, 'import pyglet\n'), ((3776, 3792), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (3790, 3792), False, 'import pyglet\n'), ((2482, 2524), 'cv2.addWeighted', 'cv2.addWeighted', (['initial_img', 'α', 'img', 'β', 'γ'], {}), '(initial_img, α, img, β, γ)\n', (2497, 2524), False, 'import cv2\n'), ((2660, 2680), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2668, 2680), True, 'import numpy as np\n'), ((3258, 3284), 'cv2.imshow', 'cv2.imshow', (['"""Lines"""', 'lines'], {}), "('Lines', lines)\n", (3268, 3284), False, 'import cv2\n'), ((3361, 3375), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3372, 3375), False, 'import cv2\n'), ((2727, 2748), 'numpy.array', 'np.array', (['[0.44, 0.0]'], {}), '([0.44, 0.0])\n', (2735, 2748), True, 'import numpy as np\n'), ((2796, 2816), 'numpy.array', 'np.array', (['[-0.44, 0]'], {}), '([-0.44, 0])\n', (2804, 2816), True, 'import numpy as np\n'), ((2864, 2884), 'numpy.array', 'np.array', (['[0.35, +1]'], {}), '([0.35, +1])\n', (2872, 2884), True, 'import numpy as np\n'), ((2933, 2953), 'numpy.array', 'np.array', (['[0.35, -1]'], {}), '([0.35, -1])\n', (2941, 2953), True, 'import numpy as np\n'), ((3002, 3018), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3010, 3018), True, 'import numpy as np\n'), ((3317, 3355), 'cv2.cvtColor', 'cv2.cvtColor', (['final', 'cv2.COLOR_BGR2RGB'], {}), '(final, cv2.COLOR_BGR2RGB)\n', (3329, 3355), False, 'import cv2\n'), ((3531, 3551), 'PIL.Image.fromarray', 'Image.fromarray', (['obs'], {}), '(obs)\n', (3546, 3551), False, 'from PIL import Image\n'), ((2064, 2075), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2072, 2075), False, 'import sys\n')] |
from collections import Counter, defaultdict
def fizz_buzz_counter():
values = []
for i in range(1, 101):
if i % 3 == 0 and i % 5 == 0:
values.append("fizzbuzz")
elif i % 3 == 0:
values.append("fizz")
elif i % 5 == 0:
values.append("buzz")
else:
values.append("int")
return Counter(values)
def fizz_buzz_defaultdict():
values = defaultdict(int)
for i in range(1, 101):
if i % 3 == 0 and i % 5 == 0:
values["fizzbuzz"] += 1
elif i % 3 == 0:
values["fizz"] += 1
elif i % 5 == 0:
values["buzz"] += 1
else:
values["int"] += 1
return values
print(dict(fizz_buzz_counter()))
print(dict(fizz_buzz_defaultdict())) | [
"collections.Counter",
"collections.defaultdict"
] | [((368, 383), 'collections.Counter', 'Counter', (['values'], {}), '(values)\n', (375, 383), False, 'from collections import Counter, defaultdict\n'), ((428, 444), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (439, 444), False, 'from collections import Counter, defaultdict\n')] |
import base64
import itertools
import json
import requests
from presqt.json_schemas.schema_handlers import schema_validator
from presqt.targets.github.utilities import validation_check
from presqt.targets.github.utilities.utils.upload_extra_metadata import upload_extra_metadata
from presqt.utilities import PresQTError
def github_upload_metadata(token, project_id, metadata_dict):
"""
Upload the metadata of this PresQT action at the top level of the repo.
Parameters
----------
token : str
The user's GitHub token
project_id : str
The id of the top level project that the upload took place on
metadata_dict : dict
The metadata to be written to the repo
"""
header, username = validation_check(token)
project_data = requests.get(
"https://api.github.com/repositories/{}".format(project_id), headers=header)
if project_data.status_code == 200:
project_name = project_data.json()['name']
else:
raise PresQTError(
"The request to create a metadata file has resulted in a {} error code from GitHub.".format(
project_data.status_code))
base_put_url = "https://api.github.com/repos/{}/{}/contents/".format(username, project_name)
metadata_file_data = requests.get('{}PRESQT_FTS_METADATA.json'.format(base_put_url),
headers=header).json()
try:
sha = metadata_file_data['sha']
except KeyError:
sha = None
# If a metadata file already exists then grab its contents
if sha:
base64_metadata = base64.b64decode(metadata_file_data['content'])
updated_metadata = json.loads(base64_metadata)
if schema_validator('presqt/json_schemas/metadata_schema.json', updated_metadata) is not True:
# We need to change the file name, this metadata is improperly formatted and
# therefore invalid.
invalid_base64_metadata = base64.b64encode(base64_metadata).decode('utf-8')
rename_payload = {
"message": "PresQT Invalid Upload",
"committer": {
"name": "PresQT",
"email": "N/A"},
"content": invalid_base64_metadata}
response = requests.put('{}{}'.format(base_put_url, 'INVALID_PRESQT_FTS_METADATA.json'),
headers=header,
data=json.dumps(rename_payload))
if response.status_code != 201:
raise PresQTError(
"The request to rename the invalid metadata file has returned a {} error code from Github.".format(
response.status_code))
else:
# Loop through each 'action' in both metadata files and make a new list of them.
joined_actions = [entry for entry in itertools.chain(metadata_dict['actions'],
updated_metadata['actions'])]
joined_keywords = [entry for entry in itertools.chain(metadata_dict['allKeywords'],
updated_metadata['allKeywords'])]
updated_metadata['actions'] = joined_actions
updated_metadata['allKeywords'] = list(set(joined_keywords))
updated_metadata_bytes = json.dumps(updated_metadata, indent=4).encode('utf-8')
updated_base64_metadata = base64.b64encode(updated_metadata_bytes).decode('utf-8')
update_payload = {
"message": "PresQT Update",
"committer": {
"name": "PresQT",
"email": "N/A"},
"branch": "master",
"content": updated_base64_metadata,
"sha": sha
}
# Now we need to update the metadata file with this updated metadata
response = requests.put('{}{}'.format(base_put_url, 'PRESQT_FTS_METADATA.json'),
headers=header,
data=json.dumps(update_payload))
if response.status_code != 200:
raise PresQTError(
"The request to create a metadata file has resulted in a {} error code from GitHub.".format(
response.status_code))
return
metadata_bytes = json.dumps(metadata_dict, indent=4).encode('utf-8')
base64_metadata = base64.b64encode(metadata_bytes).decode('utf-8')
payload = {
"message": "PresQT Upload",
"sha": sha,
"committer": {
"name": "PresQT",
"email": "N/A"},
"content": base64_metadata}
response = requests.put('{}{}'.format(base_put_url, 'PRESQT_FTS_METADATA.json'),
headers=header,
data=json.dumps(payload))
if response.status_code != 201 and response.status_code != 200:
raise PresQTError(
"The request to create a metadata file has resulted in a {} error code from GitHub.".format(
response.status_code))
# Add extra metadata to the top level resource
if 'extra_metadata' in metadata_dict.keys() and metadata_dict['extra_metadata']:
attribute_url = "https://api.github.com/repos/{}/{}".format(username, project_name)
upload_extra_metadata(metadata_dict['extra_metadata'], header, attribute_url) | [
"itertools.chain",
"json.loads",
"base64.b64encode",
"json.dumps",
"presqt.targets.github.utilities.validation_check",
"base64.b64decode",
"presqt.targets.github.utilities.utils.upload_extra_metadata.upload_extra_metadata",
"presqt.json_schemas.schema_handlers.schema_validator"
] | [((742, 765), 'presqt.targets.github.utilities.validation_check', 'validation_check', (['token'], {}), '(token)\n', (758, 765), False, 'from presqt.targets.github.utilities import validation_check\n'), ((1601, 1648), 'base64.b64decode', 'base64.b64decode', (["metadata_file_data['content']"], {}), "(metadata_file_data['content'])\n", (1617, 1648), False, 'import base64\n'), ((1676, 1703), 'json.loads', 'json.loads', (['base64_metadata'], {}), '(base64_metadata)\n', (1686, 1703), False, 'import json\n'), ((5397, 5474), 'presqt.targets.github.utilities.utils.upload_extra_metadata.upload_extra_metadata', 'upload_extra_metadata', (["metadata_dict['extra_metadata']", 'header', 'attribute_url'], {}), "(metadata_dict['extra_metadata'], header, attribute_url)\n", (5418, 5474), False, 'from presqt.targets.github.utilities.utils.upload_extra_metadata import upload_extra_metadata\n'), ((1716, 1794), 'presqt.json_schemas.schema_handlers.schema_validator', 'schema_validator', (['"""presqt/json_schemas/metadata_schema.json"""', 'updated_metadata'], {}), "('presqt/json_schemas/metadata_schema.json', updated_metadata)\n", (1732, 1794), False, 'from presqt.json_schemas.schema_handlers import schema_validator\n'), ((4422, 4457), 'json.dumps', 'json.dumps', (['metadata_dict'], {'indent': '(4)'}), '(metadata_dict, indent=4)\n', (4432, 4457), False, 'import json\n'), ((4496, 4528), 'base64.b64encode', 'base64.b64encode', (['metadata_bytes'], {}), '(metadata_bytes)\n', (4512, 4528), False, 'import base64\n'), ((4898, 4917), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (4908, 4917), False, 'import json\n'), ((1968, 2001), 'base64.b64encode', 'base64.b64encode', (['base64_metadata'], {}), '(base64_metadata)\n', (1984, 2001), False, 'import base64\n'), ((2454, 2480), 'json.dumps', 'json.dumps', (['rename_payload'], {}), '(rename_payload)\n', (2464, 2480), False, 'import json\n'), ((2884, 2954), 'itertools.chain', 'itertools.chain', (["metadata_dict['actions']", "updated_metadata['actions']"], {}), "(metadata_dict['actions'], updated_metadata['actions'])\n", (2899, 2954), False, 'import itertools\n'), ((3071, 3149), 'itertools.chain', 'itertools.chain', (["metadata_dict['allKeywords']", "updated_metadata['allKeywords']"], {}), "(metadata_dict['allKeywords'], updated_metadata['allKeywords'])\n", (3086, 3149), False, 'import itertools\n'), ((3385, 3423), 'json.dumps', 'json.dumps', (['updated_metadata'], {'indent': '(4)'}), '(updated_metadata, indent=4)\n', (3395, 3423), False, 'import json\n'), ((3478, 3518), 'base64.b64encode', 'base64.b64encode', (['updated_metadata_bytes'], {}), '(updated_metadata_bytes)\n', (3494, 3518), False, 'import base64\n'), ((4114, 4140), 'json.dumps', 'json.dumps', (['update_payload'], {}), '(update_payload)\n', (4124, 4140), False, 'import json\n')] |
#/usr/bin/env python
#-*- coding: utf-8 -*-
#Refer http://www.wooyun.org/bugs/wooyun-2015-0135532
#__Author__ = 上善若水
#_PlugName_ = tianrongxin_sql Plugin
#_FileName_ = tianrongxin_sql.py
import re
def assign(service, arg):
if service == "topsec":
return True, arg
def audit(arg):
payloads = ('policy/cap/delete.php?returnfile=timegrouptable.php&TABLE=timegroup&deletename=sqltestvul%df%27&name=timegroupname','policy/kw/delkeywd.php?kwtypename=sqltestvul%df%27')
for payload in payloads:
url = arg + payload
code, head, res, errcode, _url = curl.curl2(url)
m = re.findall('thrown in <b>(.*?)</b>',res)
# print m
if code == 200 and m:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('topsec', 'http://172.16.31.10:8080/')[1]) | [
"re.findall"
] | [((628, 669), 're.findall', 're.findall', (['"""thrown in <b>(.*?)</b>"""', 'res'], {}), "('thrown in <b>(.*?)</b>', res)\n", (638, 669), False, 'import re\n')] |
#!/usr/bin/env python3
import hashlib
import re
import requests
import sys
from pprint import pprint as pp
from .exceptions import PCloudException
from .connection import AbstractPCloudConnection
from .pcloudbin import PCloudBinaryConnection
PCLOUD_SERVER_SUFFIX = '.pcloud.com' # only allow downloads from pcloud servers
class PCloudAPIMetaclass(type):
@classmethod
def __prepare__(cls, name, bases):
methods = """
getaudiolink getpubziplink deletefolder getvideolink
file_checksum cancelsharerequest getziplink currentserver
sendverificationemail file_lock file_pwrite getpublinkdownload
file_truncate getpubthumblink getthumb listpublinks listshares
getpubaudiolink savepubthumb deletefile lostpassword
revertrevision resetpassword acceptshare userinfo diff
feedback uploadprogress listrevisions copypubfile copytolink
verifyemail getdigest file_write renamefile getthumbslinks
file_close createuploadlink notifyuploadlink getfilelink
changepassword savezip getpubthumb getthumblink file_pread
renamefolder copyfile file_seek gettreepublink deletepublink
checksumfile verifyitunespurchase supportedlanguages
gethlslink uploadfile file_open savepubzip showpublink
listplshort getfolderpublink uploadtolink createfolder
savethumb file_pread_ifmod setlanguage getpubzip
deleteuploadlink showuploadlink getzip listitunesproducts
sharefolder register declineshare sharerequestinfo
listfolder file_read file_size downloadfile invite
getcertificate changeuploadlink changeshare changepublink
listuploadlinks normalizehash getpubthumbslinks
uploadlinkprogress removeshare getfilepublink
deletefolderrecursive
""".strip().split()
return {method :
(lambda method :
(lambda self, **kwargs:
self.make_request(method, **kwargs))
)(method)
for method in methods}
class PCloudAPI(metaclass=PCloudAPIMetaclass):
"""A stripped down of the PCloudAPI.
All pcloud api methods are available as .method shortcut for
make_request(method, ...).
Exceptions that can be raised during correct operation:
(PCloudException, requests.RequestException, IOError)
"""
def __init__(self, connection=PCloudBinaryConnection, debug=False):
"""Initializes the API.
connection can be either a concrete class of AbstractPCloudConnection
or an AbstractPCloudConnection-derived object.
If debug is true dumps the parameters
"""
if issubclass(connection, AbstractPCloudConnection):
connection = connection().connect()
assert isinstance(connection, AbstractPCloudConnection), \
("PCloud instance expected, got %s" % connection.__class__)
self.connection = connection
self.debug = debug
def make_request(self, method, check_result=True, **params):
"""Performs send_command through the connection.
:param method: the method to call
:param **params: the parameters for the connection
:param _data: file data in the form of bytes or stream of bytes
:param check_result: check that the ['result'] == 0 and raise if not
:returns response in the form of a dictionary
:raises PCloudException
"""
if self.debug:
pp((method, params), stream=sys.stderr)
response = self.connection.send_command(method, **params)
if self.debug:
pp(response, stream=sys.stderr)
if check_result:
result = response.get('result', None)
if result != 0:
raise PCloudException(result_code=result)
return response
def login(self, username, password):
"""Perform login though the connection.
:param username: username
:param password: password
:returns authentication token
Also sets .auth and in turn .connection.auth to the returned token.
"""
digest = self.make_request('getdigest')['digest']
passworddigest = hashlib.sha1(
(password +
hashlib.sha1(username.lower().encode('utf-8')
).hexdigest().lower() +
digest).encode('utf-8')
).hexdigest()
auth = self.make_request('userinfo',
getauth=1,
username=username,
digest=digest,
passworddigest=passworddigest)['auth']
self.auth = auth
return auth
def get_folderid(self, path):
return self.make_request('listfolder',
path=path,
nofiles=1,
)['metadata']['folderid']
def create_directory(self, path):
"""Creates directory recursively.
Does not raise any errors if the file exists.
"""
if path == '/':
return # HACK: pcloud fails otherwise
if path == "":
return # nothing to do
# really ugly, sadly there is no mkdir -p
try:
self.make_request('createfolder', path=path)
except PCloudException as e:
if e.result_code == 2002:
# parent does not exist
# stack danger
self.create_directory(re.sub('(^/?|/+)[^/]+/?$', '', path))
self.make_request('createfolder', path=path)
elif e.result_code == 2004:
# file/folder exists, assume everything is OK
pass
else:
raise
def download(self, remote_path, local_path, progress_callback=None,
enforced_server_suffix=PCLOUD_SERVER_SUFFIX):
"""Downloads file from remote_path to local_path.
:param progress_callback: called each time with the number of bytes
written in the iteration
:param enforced_server_suffix: only allow downloads from servers having
the expected suffix (this together with ssl prevents a downloading
of non-pcloud controlled resource)
:returns pcloud api response
"""
response = self.make_request('getfilelink',
path=remote_path,
forcedownload=1)
server = response['hosts'][0] # should be the closest server
if enforced_server_suffix:
if '/' in server or not server.lower().endswith(enforced_server_suffix):
raise ValueError(
"Received download server {!r} which does not match expected suffix {!r}".format(
server, enforced_server_suffix
)
)
url = "{protocol}://{server}:{port}{path}".format(
protocol=self.connection.use_ssl and 'https' or 'http',
server=server,
port=self.connection.use_ssl and 443 or 80,
path=response['path']
)
r = requests.get(url, stream=True, allow_redirects=False, timeout=self.connection.timeout)
r.raise_for_status()
with open(local_path, 'wb') as fd:
for chunk in r.iter_content(8192):
written = fd.write(chunk)
if progress_callback:
progress_callback(written)
return response
def upload(self, local_path, remote_path,
create_parent=True, progress_callback=None):
"""Uploads file from local_path to remote_path.
:param create_parent: whether to create the parent
:param progress_callback: called each time with the number of bytes
written in the iteration
:returns pcloud api response
"""
remote_dir, filename = remote_path.rsplit('/', 1)
if create_parent:
self.create_directory(remote_dir)
with open(local_path, 'rb') as fd:
response = self.make_request('uploadfile',
_data=fd,
path=remote_dir or '/',
filename=filename,
nopartial=1,
_data_progress_callback=progress_callback)
if not response['fileids']:
raise PCloudException("Upload failed, no files reported back")
return response
def exists_file(self, remote_path):
"""Checks if file exists. Does not work for folders."""
try:
self.make_request('checksumfile',
path=remote_path)
return True
except PCloudException as e:
if e.result_code in [2002, 2009]:
return False
else:
raise
def delete_file(self, remote_path):
"""Delete file at remote_path."""
try:
self.make_request('deletefile',
path=remote_path)
except PCloudException as e:
if e.result_code in [2002, 2009]:
return False
else:
raise
@property
def auth(self):
return self.connection.auth
@auth.setter
def auth(self, auth):
self.connection.auth = auth
| [
"re.sub",
"pprint.pprint",
"requests.get"
] | [((7559, 7650), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'allow_redirects': '(False)', 'timeout': 'self.connection.timeout'}), '(url, stream=True, allow_redirects=False, timeout=self.\n connection.timeout)\n', (7571, 7650), False, 'import requests\n'), ((3743, 3782), 'pprint.pprint', 'pp', (['(method, params)'], {'stream': 'sys.stderr'}), '((method, params), stream=sys.stderr)\n', (3745, 3782), True, 'from pprint import pprint as pp\n'), ((3884, 3915), 'pprint.pprint', 'pp', (['response'], {'stream': 'sys.stderr'}), '(response, stream=sys.stderr)\n', (3886, 3915), True, 'from pprint import pprint as pp\n'), ((5866, 5902), 're.sub', 're.sub', (['"""(^/?|/+)[^/]+/?$"""', '""""""', 'path'], {}), "('(^/?|/+)[^/]+/?$', '', path)\n", (5872, 5902), False, 'import re\n')] |
import math
import os
import time
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import utils.loss as loss
import utils.tensorboard as utb
def get_probs(length, exp):
probs = (np.arange(1, length + 1) / 100) ** exp
last_x = int(0.9 * length)
probs[last_x:] = probs[last_x]
return probs / probs.sum()
def get_probs2(length, dist, prop):
probs = (np.arange(1, length + 1) / 100) ** dist
last_x = int(0.999 * length)
probs[last_x:] = probs[last_x]
return (1 - prop) / length + prop * probs / probs.sum()
def get_probs_from_ll(base_dir, T, l_dis):
event_acc = utb.load_event_accumulator(os.path.join(base_dir, 'train', 'loglikelihood', str(T)))
ll_list, sigma_list = utb.load_loglikelihood(event_acc)
idx_sorted = np.argsort(ll_list)[::-1] # From larger to smaller
probs_ordered = get_probs(len(idx_sorted), l_dis) # From smaller to larger
probs = np.ones(len(probs_ordered))
count = 0
# The one with the larger likelihood is assigned the smaller probability
for i in idx_sorted:
probs[i] = probs_ordered[count]
count = count + 1
assert np.sum(probs == 1) == 0
return probs
def log_sum_exp_trick(value_list):
max_value = np.max(value_list)
ds = value_list - max_value
sumOfExp = np.exp(ds).sum()
return max_value + np.log(sumOfExp)
SIGMA_LIST = np.linspace(0.01, 0.3, 200)
class ApproxLL:
def __init__(self, model, scaler, folder, N=128, N_max=10000, T=40, sigma_list=None):
self.folder = os.path.join(folder, 'loglikelihood', str(T))
self.model = model
self.scaler = scaler
self.bs = 256
self.k = 5
self.sigma_list = sigma_list if sigma_list else SIGMA_LIST
n_iter = math.ceil(N / self.bs)
self.N = n_iter * self.bs
self.n_sigma = len(self.sigma_list)
n_iter = math.ceil(N_max / self.bs)
self.N_max = n_iter * self.bs
self.T = T
self.writer = SummaryWriter(log_dir=self.folder)
def process_output(self, x):
assert x.shape[1] <= 3, 'Error: Data should be in CHW format'
dims = x.shape
x = self.scaler.inverse_transform(x.reshape(dims[0], -1)).reshape(dims).astype(int)
x = np.transpose(x, [0, 2, 3, 1]) # CHW --> HWC
return x
def compute_ll(self, x_recons, z_infer):
n_imgs, z_dim = z_infer.shape
for i in range(len(self.sigma_list)):
self.writer.add_scalar('sigma_list', self.sigma_list[i], i)
print('Starting analysis non-isotropic | n_imgs: {}'.format(n_imgs))
if self.N > self.N_max:
print('Error: N > N_max {} > {}'.format(self.N, self.N_max))
init_time = time.time()
# ll_list, sigma_i_list, ll_evolution_list = self.load_data(n_imgs)
event_acc = utb.load_event_accumulator(self.folder)
ll, _ = utb.load_loglikelihood(event_acc)
init_img = max(0, len(ll) - 1)
for i in range(init_img, n_imgs):
# if ll_list[i] < 0:
# summary_str = '\n[{}] LL={} | sigma={}'
# print(summary_str.format(i, ll_list[i], sigma_i_list[i]))
# continue
cur_time = time.time()
ll = self.compute_ll_img(x_recons[i], z_infer[i, :], str(i))
time_total = time.time() - init_time
time_epoch = time.time() - cur_time
min_epochs = int(time_epoch / 60)
summary_str = '\n[{}]Time: {}:{} | Total time: {} log10 = {}'
print(summary_str.format(i, min_epochs, int(time_epoch), int(time_total / 60), ll))
print('Analysis non-isotropic completed | n_imgs: {}'.format(n_imgs))
self.writer.close()
return
def compute_ll_img(self, x_recons, z_c, img_idx):
z_dim = z_c.shape[-1]
x_tmp = np.tile(x_recons, [self.bs, 1, 1, 1])
N_i = self.N
j = 0
while j < self.n_sigma:
sigma = self.sigma_list[j]
accepted_samples_count = 0
tries = 0
while accepted_samples_count == 0:
psnr_tmp, log_ratio_p_q, log_ratio_1_q = self.get_psnr_ratio(z_c, x_tmp, N_i, sigma, z_dim)
accepted_samples = psnr_tmp > self.T
accepted_samples_count = np.sum(accepted_samples)
assert tries < 5, 'There are not accepted samples in img with id {}'.format(img_idx)
tries += 1
N = np.log(len(log_ratio_p_q))
ll_i = self.get_loglikelihood(log_ratio_p_q[accepted_samples], N)
# print('IDX {} ll {} sigma {}'.format(img_idx, ll_i, sigma))
self.writer.add_histogram('log(weights)/{}'.format(img_idx), log_ratio_p_q, j)
self.writer.add_scalar('loglikelihood/{}'.format(img_idx), ll_i, j)
self.writer.add_scalar('N_i/{}'.format(img_idx), N_i, j)
if accepted_samples_count < 0.95 * N_i or self.T == 40:
j = j + 1
else:
self.writer.add_scalar('loglikelihood/{}'.format(img_idx), ll_i, j + 1)
self.writer.add_scalar('loglikelihood/{}'.format(img_idx), ll_i, j + 2)
self.writer.add_scalar('N_i/{}'.format(img_idx), N_i, j + 1)
self.writer.add_scalar('N_i/{}'.format(img_idx), N_i, j + 2)
j = j + 3
if accepted_samples_count <= N_i / 10:
if N_i == self.N_max:
break
n_iter = math.ceil(N_i * self.k / self.bs)
N_i = min(n_iter * self.bs, self.N_max)
print(N_i)
return ll_i / np.log(10)
def get_psnr_ratio(self, z_c, x_tmp, N_i, sigma, z_dim):
n_iter = math.ceil(N_i / self.bs)
psnr_tmp = np.zeros(N_i)
log_ratio_p_q = np.zeros(N_i)
log_ratio_1_q = np.zeros(N_i)
for n in range(n_iter):
z_c_tile = np.tile(z_c, [self.bs, 1])
z_tmp = z_c_tile + np.random.normal(0, sigma, [self.bs, z_dim])
x_gener = self.model.sample3(z_tmp)
x_gener = self.process_output(x_gener)
log_ratio_p_q[n * self.bs:(n + 1) * self.bs] = self.log_ratio_p_q(z_tmp, z_c_tile, sigma)
log_ratio_1_q[n * self.bs:(n + 1) * self.bs] = self.log_ratio_1_q(z_tmp, z_c_tile, sigma)
psnr_tmp[n * self.bs:(n + 1) * self.bs] = loss.PSNR(x_gener, x_tmp, axis=(1, 2, 3))
return psnr_tmp, log_ratio_p_q, log_ratio_1_q
def log_ratio_p_q(self, z, z_c, sigma):
z_dim = z.shape[-1]
return 1 / 2 * (np.sum((z - z_c) ** 2, axis=-1) / (sigma ** 2) - np.sum(z ** 2, axis=-1)) + z_dim * np.log(
sigma)
def log_ratio_1_q(self, z, z_c, sigma):
z_dim = z.shape[-1]
a = 1 / 2 * (np.sum((z - z_c) ** 2, axis=-1) / (sigma ** 2))
a2 = z_dim * np.log(sigma)
a3 = z_dim * np.log(2 * np.pi) / 2
return a + a2 + a3
def get_loglikelihood(self, log_ratio_p_q_accepted, N):
"""
Compute the log likelihood using the log_sum_trick
:param log_ratio_p_q_accepted:
:param N:
:return:
"""
return log_sum_exp_trick(log_ratio_p_q_accepted) - N
| [
"numpy.random.normal",
"torch.utils.tensorboard.SummaryWriter",
"numpy.tile",
"math.ceil",
"utils.loss.PSNR",
"numpy.log",
"numpy.max",
"numpy.argsort",
"numpy.sum",
"numpy.linspace",
"utils.tensorboard.load_event_accumulator",
"numpy.zeros",
"numpy.exp",
"numpy.transpose",
"time.time",
... | [((1385, 1412), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.3)', '(200)'], {}), '(0.01, 0.3, 200)\n', (1396, 1412), True, 'import numpy as np\n'), ((733, 766), 'utils.tensorboard.load_loglikelihood', 'utb.load_loglikelihood', (['event_acc'], {}), '(event_acc)\n', (755, 766), True, 'import utils.tensorboard as utb\n'), ((1247, 1265), 'numpy.max', 'np.max', (['value_list'], {}), '(value_list)\n', (1253, 1265), True, 'import numpy as np\n'), ((785, 804), 'numpy.argsort', 'np.argsort', (['ll_list'], {}), '(ll_list)\n', (795, 804), True, 'import numpy as np\n'), ((1152, 1170), 'numpy.sum', 'np.sum', (['(probs == 1)'], {}), '(probs == 1)\n', (1158, 1170), True, 'import numpy as np\n'), ((1353, 1369), 'numpy.log', 'np.log', (['sumOfExp'], {}), '(sumOfExp)\n', (1359, 1369), True, 'import numpy as np\n'), ((1771, 1793), 'math.ceil', 'math.ceil', (['(N / self.bs)'], {}), '(N / self.bs)\n', (1780, 1793), False, 'import math\n'), ((1890, 1916), 'math.ceil', 'math.ceil', (['(N_max / self.bs)'], {}), '(N_max / self.bs)\n', (1899, 1916), False, 'import math\n'), ((1997, 2031), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'self.folder'}), '(log_dir=self.folder)\n', (2010, 2031), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2263, 2292), 'numpy.transpose', 'np.transpose', (['x', '[0, 2, 3, 1]'], {}), '(x, [0, 2, 3, 1])\n', (2275, 2292), True, 'import numpy as np\n'), ((2730, 2741), 'time.time', 'time.time', ([], {}), '()\n', (2739, 2741), False, 'import time\n'), ((2839, 2878), 'utils.tensorboard.load_event_accumulator', 'utb.load_event_accumulator', (['self.folder'], {}), '(self.folder)\n', (2865, 2878), True, 'import utils.tensorboard as utb\n'), ((2895, 2928), 'utils.tensorboard.load_loglikelihood', 'utb.load_loglikelihood', (['event_acc'], {}), '(event_acc)\n', (2917, 2928), True, 'import utils.tensorboard as utb\n'), ((3851, 3888), 'numpy.tile', 'np.tile', (['x_recons', '[self.bs, 1, 1, 1]'], {}), '(x_recons, [self.bs, 1, 1, 1])\n', (3858, 3888), True, 'import numpy as np\n'), ((5733, 5757), 'math.ceil', 'math.ceil', (['(N_i / self.bs)'], {}), '(N_i / self.bs)\n', (5742, 5757), False, 'import math\n'), ((5777, 5790), 'numpy.zeros', 'np.zeros', (['N_i'], {}), '(N_i)\n', (5785, 5790), True, 'import numpy as np\n'), ((5815, 5828), 'numpy.zeros', 'np.zeros', (['N_i'], {}), '(N_i)\n', (5823, 5828), True, 'import numpy as np\n'), ((5853, 5866), 'numpy.zeros', 'np.zeros', (['N_i'], {}), '(N_i)\n', (5861, 5866), True, 'import numpy as np\n'), ((206, 230), 'numpy.arange', 'np.arange', (['(1)', '(length + 1)'], {}), '(1, length + 1)\n', (215, 230), True, 'import numpy as np\n'), ((393, 417), 'numpy.arange', 'np.arange', (['(1)', '(length + 1)'], {}), '(1, length + 1)\n', (402, 417), True, 'import numpy as np\n'), ((1313, 1323), 'numpy.exp', 'np.exp', (['ds'], {}), '(ds)\n', (1319, 1323), True, 'import numpy as np\n'), ((3229, 3240), 'time.time', 'time.time', ([], {}), '()\n', (3238, 3240), False, 'import time\n'), ((5643, 5653), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (5649, 5653), True, 'import numpy as np\n'), ((5922, 5948), 'numpy.tile', 'np.tile', (['z_c', '[self.bs, 1]'], {}), '(z_c, [self.bs, 1])\n', (5929, 5948), True, 'import numpy as np\n'), ((6382, 6423), 'utils.loss.PSNR', 'loss.PSNR', (['x_gener', 'x_tmp'], {'axis': '(1, 2, 3)'}), '(x_gener, x_tmp, axis=(1, 2, 3))\n', (6391, 6423), True, 'import utils.loss as loss\n'), ((6850, 6863), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (6856, 6863), True, 'import numpy as np\n'), ((3339, 3350), 'time.time', 'time.time', ([], {}), '()\n', (3348, 3350), False, 'import time\n'), ((3388, 3399), 'time.time', 'time.time', ([], {}), '()\n', (3397, 3399), False, 'import time\n'), ((4305, 4329), 'numpy.sum', 'np.sum', (['accepted_samples'], {}), '(accepted_samples)\n', (4311, 4329), True, 'import numpy as np\n'), ((5503, 5536), 'math.ceil', 'math.ceil', (['(N_i * self.k / self.bs)'], {}), '(N_i * self.k / self.bs)\n', (5512, 5536), False, 'import math\n'), ((5980, 6024), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', '[self.bs, z_dim]'], {}), '(0, sigma, [self.bs, z_dim])\n', (5996, 6024), True, 'import numpy as np\n'), ((6660, 6673), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (6666, 6673), True, 'import numpy as np\n'), ((6781, 6812), 'numpy.sum', 'np.sum', (['((z - z_c) ** 2)'], {'axis': '(-1)'}), '((z - z_c) ** 2, axis=-1)\n', (6787, 6812), True, 'import numpy as np\n'), ((6885, 6902), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6891, 6902), True, 'import numpy as np\n'), ((6625, 6648), 'numpy.sum', 'np.sum', (['(z ** 2)'], {'axis': '(-1)'}), '(z ** 2, axis=-1)\n', (6631, 6648), True, 'import numpy as np\n'), ((6576, 6607), 'numpy.sum', 'np.sum', (['((z - z_c) ** 2)'], {'axis': '(-1)'}), '((z - z_c) ** 2, axis=-1)\n', (6582, 6607), True, 'import numpy as np\n')] |
"""
References:
https://fdc.nal.usda.gov/api-guide.html
"""
import json
import decouple
import requests
from datatrans import utils
from datatrans.fooddata import search
__all__ = ['send_food_search_api_request', 'send_food_detail_api_request']
def send_food_search_api_request(
criteria: search.request.FoodSearchCriteria,
*, api_key: str = decouple.config('DATA_GOV_API_KEY', 'MY_API_KEY')
) -> requests.Response:
"""Send a Food Search Endpoint request.
Args:
criteria: FoodData Central search criteria
general_search_input (str): Search query (general text)
included_data_types (Dict[str, bool]): Specific data types to include in search
ingredients: The list of ingredients (as it appears on the product label)
brand_owner (str): Brand owner for the food
require_all_words (bool): When True, the search will only return foods
contain all of the words that were entered in the search field
page_number (int): The page of results to return
sort_field (SortField): The name of the field by which to sort
sort_direction (SortDirection): The direction of the sorting
api_key: Required. Must be a data.gov registered API key.
"""
url = 'https://api.nal.usda.gov/fdc/v1/search'
data = {utils.snake_to_camel(k): v for k, v in criteria.items() if v is not None}
if not data:
raise ValueError('No criteria to search')
if api_key == 'MY_API_KEY':
raise UserWarning('Invalid API key, configure API key in .env first')
return requests.post(url, params={'api_key': api_key},
data=json.dumps(data, cls=utils.JSONEncoder),
headers={'Content-Type': 'application/json'})
def send_food_detail_api_request(
fdc_id: int,
*, api_key: str = decouple.config('DATA_GOV_API_KEY', 'MY_API_KEY')
) -> requests.Response:
"""Send a Food Detail Endpoint request.
Args:
fdc_id: Required. Must be a data.gov registered API key.
api_key: Required. Unique identifier for the food.
"""
url = 'https://api.nal.usda.gov/fdc/v1/' + str(fdc_id)
return requests.get(url, params={'api_key': api_key},
headers={'Content-Type': 'application/json'})
if __name__ == '__main__':
pass
| [
"json.dumps",
"decouple.config",
"datatrans.utils.snake_to_camel",
"requests.get"
] | [((368, 417), 'decouple.config', 'decouple.config', (['"""DATA_GOV_API_KEY"""', '"""MY_API_KEY"""'], {}), "('DATA_GOV_API_KEY', 'MY_API_KEY')\n", (383, 417), False, 'import decouple\n'), ((1891, 1940), 'decouple.config', 'decouple.config', (['"""DATA_GOV_API_KEY"""', '"""MY_API_KEY"""'], {}), "('DATA_GOV_API_KEY', 'MY_API_KEY')\n", (1906, 1940), False, 'import decouple\n'), ((2222, 2318), 'requests.get', 'requests.get', (['url'], {'params': "{'api_key': api_key}", 'headers': "{'Content-Type': 'application/json'}"}), "(url, params={'api_key': api_key}, headers={'Content-Type':\n 'application/json'})\n", (2234, 2318), False, 'import requests\n'), ((1354, 1377), 'datatrans.utils.snake_to_camel', 'utils.snake_to_camel', (['k'], {}), '(k)\n', (1374, 1377), False, 'from datatrans import utils\n'), ((1696, 1735), 'json.dumps', 'json.dumps', (['data'], {'cls': 'utils.JSONEncoder'}), '(data, cls=utils.JSONEncoder)\n', (1706, 1735), False, 'import json\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/5/18 10:20
# @Author : chensw、wangwei
# @File : model_fusion.py
# @Describe: 标明文件实现的功能
# @Modify : 修改的地方
import pandas as pd
# Step3:模型投票融合
# 读取5个模型输出的预测概率文件
predict_1_4 = pd.read_csv('../data/output/predict/prediction_1_4.csv')
predict_2_4 = pd.read_csv('../data/output/predict/prediction_2_4.csv')
predict_3_4 = pd.read_csv('../data/output/predict/prediction_3_4.csv')
predict_a_4 = pd.read_csv('../data/output/predict/prediction_a_4.csv')
predict_23_4 = pd.read_csv('../data/output/predict/prediction_23_4.csv')
# 根据阈值划分正负样本
print('Start the model fusion!!!')
predict_1_4['label_1'] = predict_1_4.apply(lambda x: 1 if x.prob >= 0.076 else 0, axis=1)
predict_2_4['label_2'] = predict_2_4.apply(lambda x: 1 if x.prob >= 0.057 else 0, axis=1)
predict_3_4['label_3'] = predict_3_4.apply(lambda x: 1 if x.prob >= 0.076 else 0, axis=1)
predict_a_4['label_4'] = predict_a_4.apply(lambda x: 1 if x.prob >= 0.083 else 0, axis=1)
predict_23_4['label_5'] = predict_23_4.apply(lambda x: 1 if x.prob >= 0.073 else 0, axis=1)
test_set = pd.read_csv('../data/features/features_test/test_index.csv')
predict = pd.merge(test_set, predict_1_4, how='inner', on=['user_id', 'cate', 'shop_id'])
predict = pd.merge(predict, predict_2_4, how='inner', on=['user_id', 'cate', 'shop_id'])
predict = pd.merge(predict, predict_3_4, how='inner', on=['user_id', 'cate', 'shop_id'])
predict = pd.merge(predict, predict_a_4, how='inner', on=['user_id', 'cate', 'shop_id'])
predict = pd.merge(predict, predict_23_4, how='inner', on=['user_id', 'cate', 'shop_id'])
predict['label'] = predict['label_1'] + predict['label_2'] + predict['label_3'] + predict['label_4'] + predict['label_5']
predict = predict[(predict.label >= 3)][['user_id', 'cate', 'shop_id']]
print('The model fusion is OK!!!')
predict.to_csv('../data/submit/predict_fusion.csv', index=False, encoding='utf-8')
print(predict.count())
print('Output prediction results!!!')
| [
"pandas.merge",
"pandas.read_csv"
] | [((253, 309), 'pandas.read_csv', 'pd.read_csv', (['"""../data/output/predict/prediction_1_4.csv"""'], {}), "('../data/output/predict/prediction_1_4.csv')\n", (264, 309), True, 'import pandas as pd\n'), ((325, 381), 'pandas.read_csv', 'pd.read_csv', (['"""../data/output/predict/prediction_2_4.csv"""'], {}), "('../data/output/predict/prediction_2_4.csv')\n", (336, 381), True, 'import pandas as pd\n'), ((397, 453), 'pandas.read_csv', 'pd.read_csv', (['"""../data/output/predict/prediction_3_4.csv"""'], {}), "('../data/output/predict/prediction_3_4.csv')\n", (408, 453), True, 'import pandas as pd\n'), ((469, 525), 'pandas.read_csv', 'pd.read_csv', (['"""../data/output/predict/prediction_a_4.csv"""'], {}), "('../data/output/predict/prediction_a_4.csv')\n", (480, 525), True, 'import pandas as pd\n'), ((542, 599), 'pandas.read_csv', 'pd.read_csv', (['"""../data/output/predict/prediction_23_4.csv"""'], {}), "('../data/output/predict/prediction_23_4.csv')\n", (553, 599), True, 'import pandas as pd\n'), ((1123, 1183), 'pandas.read_csv', 'pd.read_csv', (['"""../data/features/features_test/test_index.csv"""'], {}), "('../data/features/features_test/test_index.csv')\n", (1134, 1183), True, 'import pandas as pd\n'), ((1195, 1274), 'pandas.merge', 'pd.merge', (['test_set', 'predict_1_4'], {'how': '"""inner"""', 'on': "['user_id', 'cate', 'shop_id']"}), "(test_set, predict_1_4, how='inner', on=['user_id', 'cate', 'shop_id'])\n", (1203, 1274), True, 'import pandas as pd\n'), ((1286, 1364), 'pandas.merge', 'pd.merge', (['predict', 'predict_2_4'], {'how': '"""inner"""', 'on': "['user_id', 'cate', 'shop_id']"}), "(predict, predict_2_4, how='inner', on=['user_id', 'cate', 'shop_id'])\n", (1294, 1364), True, 'import pandas as pd\n'), ((1376, 1454), 'pandas.merge', 'pd.merge', (['predict', 'predict_3_4'], {'how': '"""inner"""', 'on': "['user_id', 'cate', 'shop_id']"}), "(predict, predict_3_4, how='inner', on=['user_id', 'cate', 'shop_id'])\n", (1384, 1454), True, 'import pandas as pd\n'), ((1466, 1544), 'pandas.merge', 'pd.merge', (['predict', 'predict_a_4'], {'how': '"""inner"""', 'on': "['user_id', 'cate', 'shop_id']"}), "(predict, predict_a_4, how='inner', on=['user_id', 'cate', 'shop_id'])\n", (1474, 1544), True, 'import pandas as pd\n'), ((1556, 1635), 'pandas.merge', 'pd.merge', (['predict', 'predict_23_4'], {'how': '"""inner"""', 'on': "['user_id', 'cate', 'shop_id']"}), "(predict, predict_23_4, how='inner', on=['user_id', 'cate', 'shop_id'])\n", (1564, 1635), True, 'import pandas as pd\n')] |
import unittest
# Run tests without using GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TEST'] = "1"
import sys
from pathlib import Path
TEST_DIR = str(Path(__file__).parent.resolve())
BASE_DIR = str(Path(__file__).parent.parent.resolve())
sys.path.append(BASE_DIR)
from core.filters import PublicationDateFilter
from dateutil.parser import parse as parse_date
from core.api import APIRequest
from core.api import SearchRequest102
from core.api import SearchRequest103
from core.api import SnippetRequest
from core.api import MappingRequest
from core.api import DatasetSampleRequest
from core.api import SimilarPatentsRequest
from core.api import PatentPriorArtRequest
from core.api import BadRequestError
from core.api import ServerError
from core.api import ResourceNotFoundError
from core.api import DocumentRequest
from core.api import PatentDataRequest
from core.api import TitleRequest
from core.api import AbstractRequest
from core.api import AllClaimsRequest
from core.api import OneClaimRequest
from core.api import IndependentClaimsRequest
from core.api import PatentDescriptionRequest
from core.api import CitationsRequest
from core.api import BackwardCitationsRequest
from core.api import ForwardCitationsRequest
from core.api import AbstractConceptsRequest
from core.api import DescriptionConceptsRequest
from core.api import CPCsRequest
from core.api import ListThumbnailsRequest
from core.api import ThumbnailRequest
from core.api import PatentCPCVectorRequest
from core.api import PatentAbstractVectorRequest
from core.api import SimilarConceptsRequest
from core.api import ConceptVectorRequest
from core.api import DrawingRequest
from core.api import ListDrawingsRequest
from core.api import AggregatedCitationsRequest
class TestRequestClass(unittest.TestCase):
class GreetingRequest(APIRequest):
greetings = { 'en': 'Hello', 'de': 'Hallo' }
def __init__(self, req_data):
super().__init__(req_data)
def _serving_fn(self):
lang = self._data['lang']
return self.greetings[lang]
def _validation_fn(self):
if not 'lang' in self._data:
raise BadRequestError('Invalid request.')
def test_can_create_dummy_request(self):
req = APIRequest()
self.assertEqual(req.serve(), None)
def test_serving_fn_operation(self):
req = self.GreetingRequest({ 'lang': 'en' })
self.assertEqual('Hello', req.serve())
def test_raises_error_on_invalid_request(self):
def create_invalid_request():
return self.GreetingRequest({ 'locale': 'en' })
self.assertRaises(BadRequestError, create_invalid_request)
def test_raises_error_on_expection_during_serving(self):
req = self.GreetingRequest({ 'lang': 'hi' })
self.assertRaises(ServerError, req.serve)
class TestSearchRequest102Class(unittest.TestCase):
def setUp(self):
self.query = 'reducing carbon emissions'
self.date = '2017-12-12'
self.subclass = 'Y02T'
self.latent_query = 'by using green technologies'
def test_simple_search_request(self):
results = self.search({ 'q': self.query })
self.assertGreater(len(results), 0)
def test_return_custom_number_of_results(self):
results = self.search({ 'q': self.query, 'n': 13 })
self.assertEqual(13, len(results))
def test_query_with_before_cutoff_date(self):
results = self.search({ 'q': self.query, 'before': self.date })
def published_before(r):
d1 = parse_date(r['publication_date'])
d2 = parse_date(self.date)
return d1 <= d2
self.assertForEach(results, published_before)
def test_query_with_after_cutoff_date(self):
results = self.search({ 'q': self.query, 'after': self.date})
def published_after(r):
d1 = parse_date(r['publication_date'])
d2 = parse_date(self.date)
return d1 >= d2
self.assertForEach(results, published_after)
def test_query_with_index_specified(self):
cc = self.subclass
results = self.search({ 'q': self.query, 'idx': cc })
from_cc = lambda r: r['index'].startswith(cc)
self.assertForEach(results, from_cc)
def test_latent_query_affects_results(self):
latent = self.search({ 'q': self.query, 'lq': self.latent_query })
without = self.search({ 'q': self.query })
self.assertNotEqual(latent, without)
def test_return_only_non_patent_results(self):
results = self.search({ 'q': 'control systems', 'type': 'npl' })
is_npl = lambda r: r['index'].endswith('npl')
self.assertForEach(results, is_npl)
def test_include_snippets(self):
results = self.search({ 'q': self.query, 'snip': 1 })
has_snippet = lambda r: r['snippet']
self.assertForEach(results, has_snippet)
def test_include_mappings(self):
results = self.search({ 'q': self.query, 'maps': 1 })
has_mappings = lambda r: r['mapping']
self.assertForEach(results, has_mappings)
def test_raises_error_with_bad_request(self):
bad_req = lambda: SearchRequest102({ 'qry': self.query })
self.assertRaises(BadRequestError, bad_req)
def test_pagination(self):
results_a = self.search({ 'q': self.query, 'n': 10 })
results_b = self.search({ 'q': self.query, 'n': 10, 'offset': 5})
self.assertEqual(results_a[5:], results_b[:5])
def search(self, req):
req = SearchRequest102(req)
results = req.serve()['results']
return results
def assertForEach(self, results, condition):
self.assertGreater(len(results), 0)
truth_arr = [condition(res) for res in results]
self.assertTrue(all(truth_arr))
class TestSearchRequest103Class(unittest.TestCase):
def setUp(self):
self.query = 'fire fighting drone uses dry ice'
def test_simple_search(self):
combinations = self.search({ 'q': self.query })
self.assertGreater(len(combinations), 0)
def test_return_custom_number_of_results(self):
combinations = self.search({ 'q': self.query, 'n': 8 })
self.assertEqual(8, len(combinations))
def test_pagination(self):
results_a = self.search({ 'q': self.query, 'n': 10 })
results_b = self.search({ 'q': self.query, 'n': 10, 'offset': 5})
self.assertEqual(results_a[5:], results_b[:5])
def search(self, req):
req = SearchRequest103(req)
results = req.serve()['results']
return results
class TestDatasetSampleRequestClass(unittest.TestCase):
def test_request_a_sample_from_poc(self):
self.assertSample('poc', 23)
self.assertSample('poc', 45023)
def test_request_a_sample_that_does_not_exist(self):
non_existent_sample = lambda: self.make_request('poc', 200200)
self.assertRaises(ServerError, non_existent_sample)
def test_access_non_existent_dataset(self):
non_existent_dataset = lambda: self.make_request('dog', 1)
self.assertRaises(ResourceNotFoundError, non_existent_dataset)
def test_invalid_request(self):
invalid_request = lambda: DatasetSampleRequest({ 'sample': 3 }).serve()
self.assertRaises(BadRequestError, invalid_request)
def assertSample(self, dataset, n):
sample = self.make_request(dataset, n)
self.assertIsInstance(sample, dict)
def make_request(self, dataset, n):
request = DatasetSampleRequest({ 'dataset': dataset, 'n': n })
return request.serve()
class TestSimilarPatentsRequestClass(unittest.TestCase):
def test_invalid_query(self):
make_bad_query = lambda: SimilarPatentsRequest({ 'q': 'drones'})
self.assertRaises(BadRequestError, make_bad_query)
def test_with_simple_query(self):
response = SimilarPatentsRequest({ 'pn': 'US7654321B2' }).serve()
self.assertIsInstance(response, dict)
self.assertIsInstance(response['results'], list)
self.assertGreater(len(response['results']), 0)
class TestPatentPriorArtRequestClass(unittest.TestCase):
def test_with_simple_query(self):
response = PatentPriorArtRequest({ 'pn': 'US7654321B2'}).serve()
results = response['results']
def published_before(r):
d1 = parse_date(r['publication_date'])
d2 = parse_date('2006-12-27')
return d1 <= d2
self.assertForEach(results, published_before)
def assertForEach(self, results, condition):
truth_arr = [condition(res) for res in results]
self.assertTrue(all(truth_arr))
class TestSnippetRequestClass(unittest.TestCase):
pass
class TestMappingRequestClass(unittest.TestCase):
pass
class TestDrawingRequestClass(unittest.TestCase):
def setUp(self):
self.pat = 'US7654321B2'
self.app = 'US20130291398A1'
def test_get_patent_drawing(self):
response = DrawingRequest({'pn': self.pat, 'n': 1}).serve()
self.assertIsInstance(response, str)
def test_get_second_image(self):
response = DrawingRequest({'pn': self.pat, 'n': 2}).serve()
self.assertIsInstance(response, str)
def test_get_publication_drawing(self):
response = DrawingRequest({'pn': self.app, 'n': 1}).serve()
self.assertIsInstance(response, str)
class TestListDrawingsRequestClass(unittest.TestCase):
def setUp(self):
self.pat = 'US7654321B2'
self.app = 'US20130291398A1'
def test_list_drawings_of_patent(self):
response = ListDrawingsRequest({'pn': self.pat}).serve()
self.assertEqual(8, len(response['drawings']))
self.assertEqual(self.pat, response['pn'])
def test_list_drawings_of_application(self):
response = ListDrawingsRequest({'pn': self.app}).serve()
self.assertEqual(12, len(response['drawings']))
self.assertEqual(self.app, response['pn'])
class TestDocumentRequestClass(unittest.TestCase):
def test_get_patent_document(self):
doc = DocumentRequest({'id': 'US7654321B2'}).serve()
self.assertIsInstance(doc, dict)
self.assertEqual(doc['id'], 'US7654321B2')
class TestPatentDataRequestClass(unittest.TestCase):
def test_returns_patent_data(self):
data = PatentDataRequest({'pn': 'US7654321B2'}).serve()
self.assertIsInstance(data, dict)
self.assertEqual(data['title'][:24], 'Formation fluid sampling')
self.assertEqual(data['pn'], 'US7654321B2')
self.assertNonNullString(data['abstract'])
self.assertNonNullString(data['description'])
self.assertIsInstance(data['claims'], list)
self.assertGreater(len(data['claims']), 0)
def assertNonNullString(self, string):
self.assertIsInstance(string, str)
self.assertGreater(len(string.strip()), 0)
class TestTitleRequestClass(unittest.TestCase):
def test_get_title(self):
pn = 'US7654321B2'
title = 'Formation fluid sampling apparatus and methods'
response = TitleRequest({'pn': pn}).serve()
self.assertEqual(response['pn'], pn)
self.assertEqual(response['title'], title)
class TestAbstractRequestClass(unittest.TestCase):
def test_get_abstract(self):
pn = 'US7654321B2'
abst = 'A fluid sampling system retrieves'
response = AbstractRequest({'pn': pn}).serve()
self.assertEqual(response['pn'], pn)
self.assertEqual(response['abstract'][:len(abst)], abst)
class TestAllClaimsRequestClass(unittest.TestCase):
def test_get_all_claims(self):
pn = 'US7654321B2'
response = AllClaimsRequest({'pn': pn}).serve()
self.assertIsInstance(response['claims'], list)
self.assertEqual(26, len(response['claims']))
class TestOneClaimRequestClass(unittest.TestCase):
def setUp(self):
self.pn = 'US7654321B2'
def test_get_one_claim(self):
claim_2 = '2. The fluid sampling system of claim 1, in which'
response = OneClaimRequest({'pn': self.pn, 'n': 2}).serve()
self.assertEqual(2, response['claim_num'])
self.assertEqual(claim_2, response['claim'][:len(claim_2)])
def test_raises_error_on_invalid_requests(self):
invalid_requests = [
{'pn': self.pn},
{'pn': self.pn, 'n': 0},
{'pn': self.pn, 'n': 'first'},
{'pn': self.pn, 'n': 27},
{'pn': self.pn, 'n': -1}
]
for req_data in invalid_requests:
req = lambda: OneClaimRequest(req_data).serve()
self.assertRaises(BadRequestError, req)
class TestIndependentClaimsRequestClass(unittest.TestCase):
def test_get_independent_claims(self):
pn = 'US7654321B2'
response = IndependentClaimsRequest({'pn': pn}).serve()
self.assertEqual(response['pn'], pn)
self.assertEqual(6, len(response['claims']))
class TestPatentDescriptionRequestClass(unittest.TestCase):
def test_get_description(self):
pn = 'US7654321B2'
response = PatentDescriptionRequest({'pn': pn}).serve()
self.assertNonNullString(response['description'])
def assertNonNullString(self, string):
self.assertIsInstance(string, str)
self.assertGreater(len(string.strip()), 0)
class TestCitationsRequestClass(unittest.TestCase):
def test_get_citations(self):
pn = 'US7654321B2'
response = CitationsRequest({'pn': pn}).serve()
self.assertIsInstance(response['citations_backward'], list)
self.assertGreater(len(response['citations_backward']), 0)
self.assertIsInstance(response['citations_forward'], list)
self.assertGreater(len(response['citations_forward']), 0)
class TestBackwardCitationsRequestClass(unittest.TestCase):
def test_get_back_citations(self):
pn = 'US7654321B2'
response = BackwardCitationsRequest({'pn': pn}).serve()
self.assertIsInstance(response['citations_backward'], list)
self.assertGreater(len(response['citations_backward']), 0)
class TestForwardCitationsRequestClass(unittest.TestCase):
def test_get_forward_citations(self):
pn = 'US7654321B2'
response = ForwardCitationsRequest({'pn': pn}).serve()
self.assertIsInstance(response['citations_forward'], list)
self.assertGreater(len(response['citations_forward']), 0)
class TestAbstractConceptsRequestClass(unittest.TestCase):
def test_get_concepts_from_abstract(self):
pn = 'US7654321B2'
response = AbstractConceptsRequest({'pn': pn}).serve()
self.assertIsInstance(response['concepts'], list)
self.assertGreater(len(response['concepts']), 0)
class TestDescriptionConceptsRequestClass(unittest.TestCase):
def test_get_concepts_from_description(self):
pn = 'US7654321B2'
response = AbstractConceptsRequest({'pn': pn}).serve()
self.assertIsInstance(response['concepts'], list)
self.assertGreater(len(response['concepts']), 0)
class TestCPCsRequestClass(unittest.TestCase):
def test_get_cpcs(self):
pn = 'US7654321B2'
response = CPCsRequest({'pn': pn}).serve()
self.assertIsInstance(response['cpcs'], list)
self.assertGreater(len(response['cpcs']), 0)
class TestListThumbnailsRequestClass(unittest.TestCase):
def test_get_list_of_available_thumbnails(self):
pn = 'US7654321B2'
response = ListThumbnailsRequest({'pn': pn}).serve()
self.assertEqual(8, len(response['thumbnails']))
class TestThumbnailRequestClass(unittest.TestCase):
def test_get_a_thumbnail(self):
req_data = {'pn': 'US7654321B2', 'n': '1'}
response = ThumbnailRequest(req_data).serve()
self.assertIsInstance(response, str)
class TestPatentCPCVectorRequestClass(unittest.TestCase):
def test_get_cpc_patent_vector(self):
pn = 'US7654321B2'
response = PatentCPCVectorRequest({'pn': pn}).serve()
self.assertIsInstance(response['vector'], list)
self.assertEqual(256, len(response['vector']))
class TestPatentAbstractVectorRequestClass(unittest.TestCase):
def test_get_abstract_text_vector(self):
pn = 'US7654321B2'
response = PatentAbstractVectorRequest({'pn': pn}).serve()
self.assertIsInstance(response['vector'], list)
self.assertEqual(768, len(response['vector']))
class TestSimilarConceptsRequestClass(unittest.TestCase):
def test_get_similar_concepts_to_vehicle(self):
response = SimilarConceptsRequest({'concept': 'vehicle'}).serve()
self.assertIsInstance(response['similar'], list)
self.assertGreater(len(response['similar']), 0)
def test_return_custom_number_of_concepts(self):
request = {'concept': 'vehicle', 'n': 13}
response = SimilarConceptsRequest(request).serve()
self.assertIsInstance(response['similar'], list)
self.assertEqual(13, len(response['similar']))
def test_raises_error_on_invalid_concept(self):
attempt = lambda: SimilarConceptsRequest({'concept': 'django'}).serve()
self.assertRaises(ResourceNotFoundError, attempt)
class TestConceptVectorRequestClass(unittest.TestCase):
def test_get_vector_for_vehicle(self):
response = ConceptVectorRequest({'concept': 'vehicle'}).serve()
self.assertIsInstance(response['vector'], list)
self.assertEqual(256, len(response['vector']))
def test_raises_error_on_invalid_concept(self):
attempt = lambda: ConceptVectorRequest({'concept': 'django'}).serve()
self.assertRaises(ResourceNotFoundError, attempt)
class TestAggregatedCitationsRequest(unittest.TestCase):
def test_get_one_level_citations(self):
req_data = {'levels': 1, 'pn': 'US7654321B2'}
response = AggregatedCitationsRequest(req_data).serve()
self.assertIsInstance(response, list)
self.assertEqual(len(response), 73)
def test_get_two_level_citations(self):
req_data = {'levels': 2, 'pn': 'US7654321B2'}
response = AggregatedCitationsRequest(req_data).serve()
self.assertIsInstance(response, list)
self.assertGreater(len(response), 73)
def test_raises_error_if_level_parameter_missing(self):
req_data = {'pn': 'US7654321B2'}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(BadRequestError, attempt)
def test_raises_error_if_no_level_specified(self):
req_data = {'pn': 'US7654321B2', 'levels': None}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(BadRequestError, attempt)
def test_raises_error_if_level_out_of_range(self):
req_data = {'levels': 5, 'pn': 'US7654321B2'}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(BadRequestError, attempt)
def test_raises_error_if_citations_grow_a_lot(self):
req_data = {'levels': 4, 'pn': 'US7654321B2'}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(ServerError, attempt)
if __name__ == '__main__':
unittest.main() | [
"core.api.IndependentClaimsRequest",
"core.api.ForwardCitationsRequest",
"core.api.DrawingRequest",
"unittest.main",
"core.api.CitationsRequest",
"core.api.ConceptVectorRequest",
"sys.path.append",
"core.api.AggregatedCitationsRequest",
"core.api.TitleRequest",
"core.api.OneClaimRequest",
"core.... | [((264, 289), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (279, 289), False, 'import sys\n'), ((19348, 19363), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19361, 19363), False, 'import unittest\n'), ((2285, 2297), 'core.api.APIRequest', 'APIRequest', ([], {}), '()\n', (2295, 2297), False, 'from core.api import APIRequest\n'), ((5563, 5584), 'core.api.SearchRequest102', 'SearchRequest102', (['req'], {}), '(req)\n', (5579, 5584), False, 'from core.api import SearchRequest102\n'), ((6548, 6569), 'core.api.SearchRequest103', 'SearchRequest103', (['req'], {}), '(req)\n', (6564, 6569), False, 'from core.api import SearchRequest103\n'), ((7560, 7610), 'core.api.DatasetSampleRequest', 'DatasetSampleRequest', (["{'dataset': dataset, 'n': n}"], {}), "({'dataset': dataset, 'n': n})\n", (7580, 7610), False, 'from core.api import DatasetSampleRequest\n'), ((3581, 3614), 'dateutil.parser.parse', 'parse_date', (["r['publication_date']"], {}), "(r['publication_date'])\n", (3591, 3614), True, 'from dateutil.parser import parse as parse_date\n'), ((3632, 3653), 'dateutil.parser.parse', 'parse_date', (['self.date'], {}), '(self.date)\n', (3642, 3653), True, 'from dateutil.parser import parse as parse_date\n'), ((3905, 3938), 'dateutil.parser.parse', 'parse_date', (["r['publication_date']"], {}), "(r['publication_date'])\n", (3915, 3938), True, 'from dateutil.parser import parse as parse_date\n'), ((3956, 3977), 'dateutil.parser.parse', 'parse_date', (['self.date'], {}), '(self.date)\n', (3966, 3977), True, 'from dateutil.parser import parse as parse_date\n'), ((5206, 5243), 'core.api.SearchRequest102', 'SearchRequest102', (["{'qry': self.query}"], {}), "({'qry': self.query})\n", (5222, 5243), False, 'from core.api import SearchRequest102\n'), ((7771, 7809), 'core.api.SimilarPatentsRequest', 'SimilarPatentsRequest', (["{'q': 'drones'}"], {}), "({'q': 'drones'})\n", (7792, 7809), False, 'from core.api import SimilarPatentsRequest\n'), ((8401, 8434), 'dateutil.parser.parse', 'parse_date', (["r['publication_date']"], {}), "(r['publication_date'])\n", (8411, 8434), True, 'from dateutil.parser import parse as parse_date\n'), ((8452, 8476), 'dateutil.parser.parse', 'parse_date', (['"""2006-12-27"""'], {}), "('2006-12-27')\n", (8462, 8476), True, 'from dateutil.parser import parse as parse_date\n'), ((176, 190), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (180, 190), False, 'from pathlib import Path\n'), ((2189, 2224), 'core.api.BadRequestError', 'BadRequestError', (['"""Invalid request."""'], {}), "('Invalid request.')\n", (2204, 2224), False, 'from core.api import BadRequestError\n'), ((7928, 7972), 'core.api.SimilarPatentsRequest', 'SimilarPatentsRequest', (["{'pn': 'US7654321B2'}"], {}), "({'pn': 'US7654321B2'})\n", (7949, 7972), False, 'from core.api import SimilarPatentsRequest\n'), ((8259, 8303), 'core.api.PatentPriorArtRequest', 'PatentPriorArtRequest', (["{'pn': 'US7654321B2'}"], {}), "({'pn': 'US7654321B2'})\n", (8280, 8303), False, 'from core.api import PatentPriorArtRequest\n'), ((9030, 9070), 'core.api.DrawingRequest', 'DrawingRequest', (["{'pn': self.pat, 'n': 1}"], {}), "({'pn': self.pat, 'n': 1})\n", (9044, 9070), False, 'from core.api import DrawingRequest\n'), ((9181, 9221), 'core.api.DrawingRequest', 'DrawingRequest', (["{'pn': self.pat, 'n': 2}"], {}), "({'pn': self.pat, 'n': 2})\n", (9195, 9221), False, 'from core.api import DrawingRequest\n'), ((9339, 9379), 'core.api.DrawingRequest', 'DrawingRequest', (["{'pn': self.app, 'n': 1}"], {}), "({'pn': self.app, 'n': 1})\n", (9353, 9379), False, 'from core.api import DrawingRequest\n'), ((9646, 9683), 'core.api.ListDrawingsRequest', 'ListDrawingsRequest', (["{'pn': self.pat}"], {}), "({'pn': self.pat})\n", (9665, 9683), False, 'from core.api import ListDrawingsRequest\n'), ((9867, 9904), 'core.api.ListDrawingsRequest', 'ListDrawingsRequest', (["{'pn': self.app}"], {}), "({'pn': self.app})\n", (9886, 9904), False, 'from core.api import ListDrawingsRequest\n'), ((10128, 10166), 'core.api.DocumentRequest', 'DocumentRequest', (["{'id': 'US7654321B2'}"], {}), "({'id': 'US7654321B2'})\n", (10143, 10166), False, 'from core.api import DocumentRequest\n'), ((10378, 10418), 'core.api.PatentDataRequest', 'PatentDataRequest', (["{'pn': 'US7654321B2'}"], {}), "({'pn': 'US7654321B2'})\n", (10395, 10418), False, 'from core.api import PatentDataRequest\n'), ((11136, 11160), 'core.api.TitleRequest', 'TitleRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (11148, 11160), False, 'from core.api import TitleRequest\n'), ((11453, 11480), 'core.api.AbstractRequest', 'AbstractRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (11468, 11480), False, 'from core.api import AbstractRequest\n'), ((11738, 11766), 'core.api.AllClaimsRequest', 'AllClaimsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (11754, 11766), False, 'from core.api import AllClaimsRequest\n'), ((12120, 12160), 'core.api.OneClaimRequest', 'OneClaimRequest', (["{'pn': self.pn, 'n': 2}"], {}), "({'pn': self.pn, 'n': 2})\n", (12135, 12160), False, 'from core.api import OneClaimRequest\n'), ((12879, 12915), 'core.api.IndependentClaimsRequest', 'IndependentClaimsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (12903, 12915), False, 'from core.api import IndependentClaimsRequest\n'), ((13171, 13207), 'core.api.PatentDescriptionRequest', 'PatentDescriptionRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (13195, 13207), False, 'from core.api import PatentDescriptionRequest\n'), ((13551, 13579), 'core.api.CitationsRequest', 'CitationsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (13567, 13579), False, 'from core.api import CitationsRequest\n'), ((14008, 14044), 'core.api.BackwardCitationsRequest', 'BackwardCitationsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (14032, 14044), False, 'from core.api import BackwardCitationsRequest\n'), ((14342, 14377), 'core.api.ForwardCitationsRequest', 'ForwardCitationsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (14365, 14377), False, 'from core.api import ForwardCitationsRequest\n'), ((14678, 14713), 'core.api.AbstractConceptsRequest', 'AbstractConceptsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (14701, 14713), False, 'from core.api import AbstractConceptsRequest\n'), ((15002, 15037), 'core.api.AbstractConceptsRequest', 'AbstractConceptsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (15025, 15037), False, 'from core.api import AbstractConceptsRequest\n'), ((15290, 15313), 'core.api.CPCsRequest', 'CPCsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (15301, 15313), False, 'from core.api import CPCsRequest\n'), ((15592, 15625), 'core.api.ListThumbnailsRequest', 'ListThumbnailsRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (15613, 15625), False, 'from core.api import ListThumbnailsRequest\n'), ((15856, 15882), 'core.api.ThumbnailRequest', 'ThumbnailRequest', (['req_data'], {}), '(req_data)\n', (15872, 15882), False, 'from core.api import ThumbnailRequest\n'), ((16089, 16123), 'core.api.PatentCPCVectorRequest', 'PatentCPCVectorRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (16111, 16123), False, 'from core.api import PatentCPCVectorRequest\n'), ((16404, 16443), 'core.api.PatentAbstractVectorRequest', 'PatentAbstractVectorRequest', (["{'pn': pn}"], {}), "({'pn': pn})\n", (16431, 16443), False, 'from core.api import PatentAbstractVectorRequest\n'), ((16699, 16745), 'core.api.SimilarConceptsRequest', 'SimilarConceptsRequest', (["{'concept': 'vehicle'}"], {}), "({'concept': 'vehicle'})\n", (16721, 16745), False, 'from core.api import SimilarConceptsRequest\n'), ((16990, 17021), 'core.api.SimilarConceptsRequest', 'SimilarConceptsRequest', (['request'], {}), '(request)\n', (17012, 17021), False, 'from core.api import SimilarConceptsRequest\n'), ((17466, 17510), 'core.api.ConceptVectorRequest', 'ConceptVectorRequest', (["{'concept': 'vehicle'}"], {}), "({'concept': 'vehicle'})\n", (17486, 17510), False, 'from core.api import ConceptVectorRequest\n'), ((18000, 18036), 'core.api.AggregatedCitationsRequest', 'AggregatedCitationsRequest', (['req_data'], {}), '(req_data)\n', (18026, 18036), False, 'from core.api import AggregatedCitationsRequest\n'), ((18253, 18289), 'core.api.AggregatedCitationsRequest', 'AggregatedCitationsRequest', (['req_data'], {}), '(req_data)\n', (18279, 18289), False, 'from core.api import AggregatedCitationsRequest\n'), ((224, 238), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (228, 238), False, 'from pathlib import Path\n'), ((7263, 7298), 'core.api.DatasetSampleRequest', 'DatasetSampleRequest', (["{'sample': 3}"], {}), "({'sample': 3})\n", (7283, 7298), False, 'from core.api import DatasetSampleRequest\n'), ((17221, 17266), 'core.api.SimilarConceptsRequest', 'SimilarConceptsRequest', (["{'concept': 'django'}"], {}), "({'concept': 'django'})\n", (17243, 17266), False, 'from core.api import SimilarConceptsRequest\n'), ((17709, 17752), 'core.api.ConceptVectorRequest', 'ConceptVectorRequest', (["{'concept': 'django'}"], {}), "({'concept': 'django'})\n", (17729, 17752), False, 'from core.api import ConceptVectorRequest\n'), ((18518, 18554), 'core.api.AggregatedCitationsRequest', 'AggregatedCitationsRequest', (['req_data'], {}), '(req_data)\n', (18544, 18554), False, 'from core.api import AggregatedCitationsRequest\n'), ((18754, 18790), 'core.api.AggregatedCitationsRequest', 'AggregatedCitationsRequest', (['req_data'], {}), '(req_data)\n', (18780, 18790), False, 'from core.api import AggregatedCitationsRequest\n'), ((18987, 19023), 'core.api.AggregatedCitationsRequest', 'AggregatedCitationsRequest', (['req_data'], {}), '(req_data)\n', (19013, 19023), False, 'from core.api import AggregatedCitationsRequest\n'), ((19222, 19258), 'core.api.AggregatedCitationsRequest', 'AggregatedCitationsRequest', (['req_data'], {}), '(req_data)\n', (19248, 19258), False, 'from core.api import AggregatedCitationsRequest\n'), ((12637, 12662), 'core.api.OneClaimRequest', 'OneClaimRequest', (['req_data'], {}), '(req_data)\n', (12652, 12662), False, 'from core.api import OneClaimRequest\n')] |
#%% Setup
from matplotlib import rc
rc("font", **{"family": "serif", "serif": ["Computer Modern Roman"], "size": 20})
rc("text", usetex=True)
import os
import numpy as np
import mpmath as mp
import matplotlib.pyplot as plt
from scipy.special import lambertw
from scipy import optimize
# path = os.path.expanduser(
# "~\\Documents\\Git\\mattwthomas.com\\assets\\images\\posts\\2021\\"
# )
# os.chdir(path)
#%% alpha plot
nn = 50 # resolution of plot
xmin = 0
xmax = 1
def alphabar(dk):
logdk = np.log(dk)
W = lambertw(-dk * logdk)
return 1 - W / logdk
x = np.linspace(1 / nn, xmax, nn)
y = alphabar(x)
figure = plt.figure()
plt.plot(x, y)
plt.fill_between(x, y, alpha=0.2)
plt.axis([xmin, xmax, 0, 2])
plt.ylabel(r"$\bar{r}(\delta / k)$")
plt.xlabel(r"$\delta / k$")
plt.xticks([0, 0.25, 0.5, 0.75, 1], [0, 0.25, 0.5, 0.75, 1])
plt.yticks([0, 1, 2])
figure.set_dpi(100)
figure.set_size_inches(4, 2.5)
figure.savefig("tullock-maximum-r.svg", optimize=True, bbox_inches="tight")
# %% direct discrimination
nn = 50 # resolution of plot
xmin = 0
xmax = 4
def s1(d, k=2):
return (k * d) / (k + d) ** 2
x = np.linspace(xmin, xmax, nn)
y = s1(x, k=2)
figure = plt.figure()
plt.plot(x, y)
plt.axis([xmin, xmax, 0, 0.3])
plt.ylabel(r"$s_1$")
plt.xlabel(r"$\delta$")
plt.xticks([0, 1, 2], ["0", "1", "k"])
plt.yticks([0])
figure.set_dpi(100)
figure.set_size_inches(4, 2.5)
figure.savefig("tullock-direct-discrimination.svg", bbox_inches="tight")
# %% covert discrimination
nn = 1000 # resolution of plot
xmin = 1
xmax = 10
x = np.linspace(xmin + 1 / nn, xmax, nn)
# Create inner loop
def alphainner(a, k):
f1 = 1 / np.log(k)
f2 = 1 - (2 / (1 + k ** a))
return a - f1 / f2
# make an initial guess
f10 = 1 / np.log(x)
f20 = 1 - (2 / (1 + x))
y = optimize.root(alphainner, x0=(f10 / f20), args=(x))
figure = plt.figure()
plt.plot(x, y.x)
plt.plot(x, alphabar(1 / x))
plt.axis([xmin, xmax, 0, 2])
plt.ylabel(r"$r$")
plt.xlabel(r"$k$")
plt.xticks(np.linspace(1, xmax, xmax))
plt.yticks([0, 1, 2])
plt.legend([r"$r^\star(k)$", r"$\bar{r}(1/k)$"])
figure.set_dpi(100)
figure.set_size_inches(4, 2.5)
figure.savefig("tullock-covert-discrimination.svg", bbox_inches="tight")
# %% revenue graph
nn = 1000 # resolution of plot
xmin = 0
xmax = 3
@np.vectorize
def revenue(r, k=2, d=1):
if r <= alphabar(d / k):
return (1 + 1 / k) * ((r * (k * d) ** r) / (k ** r + d ** r) ** 2)
elif r > 2:
return (d / k) * ((1 + k) / (2 * k))
else:
alphar = (2 / r) * (r - 1) ** ((r - 1) / r)
return alphar * (d / k) * ((1 + k) / (2 * k))
x = np.linspace(xmin, xmax, nn)
y2 = revenue(x, k=1.5)
y6 = revenue(x, k=8)
figure = plt.figure()
plt.plot(x, y2)
plt.plot(x, y6)
# plt.axis([xmin, xmax, 0, 0.42])
plt.ylabel("Revenue")
plt.xlabel(r"$r$")
plt.xticks(
[0, alphabar(1 / 8), alphabar(1 / 1.5), 2],
["0", r"$\bar{r}(1/8)$", r"$\bar{r}(2/3)$", "2"],
)
plt.vlines(alphabar(1 / 1.5), ymin=0, ymax=max(y2), color="C0", linestyle="dashed")
plt.vlines(alphabar(1 / 8), ymin=0, ymax=revenue(alphabar(1 / 8), k=8), color="C1", linestyle="dashed")
plt.vlines(2, ymin=0, ymax=max(y2), color="grey", linestyle="dashed")
plt.yticks([0])
plt.legend([r"$\delta/k = 2/3$", r"$\delta/k = 1/8$"], loc="upper left")
figure.set_dpi(100)
figure.set_size_inches(4, 2.5)
figure.savefig("tullock-covert-revenue.svg", bbox_inches="tight")
# %%
| [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"scipy.special.lambertw",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"matplotlib.pyplot.fill_between",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.rc",
"scipy.optimize.root... | [((37, 122), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern Roman'], 'size':\n 20})\n", (39, 122), False, 'from matplotlib import rc\n'), ((119, 142), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (121, 142), False, 'from matplotlib import rc\n'), ((581, 610), 'numpy.linspace', 'np.linspace', (['(1 / nn)', 'xmax', 'nn'], {}), '(1 / nn, xmax, nn)\n', (592, 610), True, 'import numpy as np\n'), ((637, 649), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (647, 649), True, 'import matplotlib.pyplot as plt\n'), ((650, 664), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (658, 664), True, 'import matplotlib.pyplot as plt\n'), ((665, 698), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'y'], {'alpha': '(0.2)'}), '(x, y, alpha=0.2)\n', (681, 698), True, 'import matplotlib.pyplot as plt\n'), ((699, 727), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, 0, 2]'], {}), '([xmin, xmax, 0, 2])\n', (707, 727), True, 'import matplotlib.pyplot as plt\n'), ((728, 765), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\bar{r}(\\\\delta / k)$"""'], {}), "('$\\\\bar{r}(\\\\delta / k)$')\n", (738, 765), True, 'import matplotlib.pyplot as plt\n'), ((765, 792), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta / k$"""'], {}), "('$\\\\delta / k$')\n", (775, 792), True, 'import matplotlib.pyplot as plt\n'), ((793, 853), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 0.25, 0.5, 0.75, 1]', '[0, 0.25, 0.5, 0.75, 1]'], {}), '([0, 0.25, 0.5, 0.75, 1], [0, 0.25, 0.5, 0.75, 1])\n', (803, 853), True, 'import matplotlib.pyplot as plt\n'), ((854, 875), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (864, 875), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1166), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nn'], {}), '(xmin, xmax, nn)\n', (1150, 1166), True, 'import numpy as np\n'), ((1192, 1204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1202, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1205, 1219), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1213, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1220, 1250), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, 0, 0.3]'], {}), '([xmin, xmax, 0, 0.3])\n', (1228, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$s_1$"""'], {}), "('$s_1$')\n", (1261, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta$"""'], {}), "('$\\\\delta$')\n", (1282, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1334), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2]', "['0', '1', 'k']"], {}), "([0, 1, 2], ['0', '1', 'k'])\n", (1306, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1350), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0]'], {}), '([0])\n', (1345, 1350), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1596), 'numpy.linspace', 'np.linspace', (['(xmin + 1 / nn)', 'xmax', 'nn'], {}), '(xmin + 1 / nn, xmax, nn)\n', (1571, 1596), True, 'import numpy as np\n'), ((1793, 1840), 'scipy.optimize.root', 'optimize.root', (['alphainner'], {'x0': '(f10 / f20)', 'args': 'x'}), '(alphainner, x0=f10 / f20, args=x)\n', (1806, 1840), False, 'from scipy import optimize\n'), ((1855, 1867), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1865, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1884), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y.x'], {}), '(x, y.x)\n', (1876, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1942), 'matplotlib.pyplot.axis', 'plt.axis', (['[xmin, xmax, 0, 2]'], {}), '([xmin, xmax, 0, 2])\n', (1922, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1960), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$r$"""'], {}), "('$r$')\n", (1953, 1960), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1979), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (1972, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2020, 2041), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2030, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2090), 'matplotlib.pyplot.legend', 'plt.legend', (["['$r^\\\\star(k)$', '$\\\\bar{r}(1/k)$']"], {}), "(['$r^\\\\star(k)$', '$\\\\bar{r}(1/k)$'])\n", (2052, 2090), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2642), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nn'], {}), '(xmin, xmax, nn)\n', (2626, 2642), True, 'import numpy as np\n'), ((2697, 2709), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2707, 2709), True, 'import matplotlib.pyplot as plt\n'), ((2710, 2725), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (2718, 2725), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2741), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y6'], {}), '(x, y6)\n', (2734, 2741), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2797), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Revenue"""'], {}), "('Revenue')\n", (2786, 2797), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2815), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$r$"""'], {}), "('$r$')\n", (2808, 2815), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3206), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0]'], {}), '([0])\n', (3201, 3206), True, 'import matplotlib.pyplot as plt\n'), ((3207, 3279), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\delta/k = 2/3$', '$\\\\delta/k = 1/8$']"], {'loc': '"""upper left"""'}), "(['$\\\\delta/k = 2/3$', '$\\\\delta/k = 1/8$'], loc='upper left')\n", (3217, 3279), True, 'import matplotlib.pyplot as plt\n'), ((509, 519), 'numpy.log', 'np.log', (['dk'], {}), '(dk)\n', (515, 519), True, 'import numpy as np\n'), ((528, 549), 'scipy.special.lambertw', 'lambertw', (['(-dk * logdk)'], {}), '(-dk * logdk)\n', (536, 549), False, 'from scipy.special import lambertw\n'), ((1754, 1763), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1760, 1763), True, 'import numpy as np\n'), ((1992, 2018), 'numpy.linspace', 'np.linspace', (['(1)', 'xmax', 'xmax'], {}), '(1, xmax, xmax)\n', (2003, 2018), True, 'import numpy as np\n'), ((1653, 1662), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (1659, 1662), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import cudasift
import cv2
import numpy as np
from profiling import TaggedTimer
def main():
sift = cudasift.PyCudaSift(dev_num=0)
timr = TaggedTimer()
filename = "../data/CY_279b46b9_1575825158217_1575825184058.jpg"
# filename = "/home/jfinken/projects/here/sp/jfinken/faiss_gpu/AIC_query2.jpg"
data = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
# for writing out keypoints
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
height, width = data.shape
print(f"Input image: {width}x{height}")
# data = np.ascontiguousarray(data, dtype=np.float32).ravel()
# data = np.array(data, dtype=np.float32).ravel()
data = img.astype(np.float32).ravel()
timr("np.ascontiguousarray")
NUM_RUNS = 3
# Allocate CUDA memory for the source image: once
sift.allocate_cuda_image(
width, height, cudasift.i_align_up(width, 128), False, None, None
)
timr("allocate_cuda_image")
# Allocate storage for internal results
sift.init_sift_data(max_pts=32768, host=True, dev=True)
sift.allocate_sift_temp_memory(width, height, 5, False)
timr("allocate_sift_temp_memory")
for j in range(NUM_RUNS):
# Convenient and temporally performant optimization:
# Reuse CUDA malloc-ed device memory
# Simply download this input image to the device
sift.download_cuda_image(data)
timr("download_cuda_image")
# Run
sift.extract_sift(
# num_octaves=5, init_blur=1.0, thresh=2.0, lowest_scale=0.0, scale_up=False
num_octaves=5,
init_blur=1.0,
thresh=2.0,
lowest_scale=0.0,
scale_up=False,
)
timr("extract_sift")
print(timr)
# Get descriptors and keypoints
desc, kp = sift.get_features()
desc_np = np.asarray(desc)
kp_np = np.asarray(kp)
timr(
f"get_features done (num_pts={desc_np.shape[0]}, desc_np.shape={desc_np.shape}, kp_np.shape={kp_np.shape})"
)
print(timr)
"""
# Debug: make cv2 keypoints
kps = []
for i in range(kp_np.shape[0]):
# print(f"keypt @ {desc[i].get('xpos')}, {desc[i].get('ypos')}")
kps.append(
cv2.KeyPoint(
x=int(kp_np[i, 0]),
y=int(kp_np[i, 1]),
_size=kp_np[i, 2],
_angle=kp_np[i, 3],
)
)
timr("for-loop over keypoints")
print(timr)
img = cv2.drawKeypoints(
img,
kps,
outImage=np.array([]),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
)
timr("cv2.drawKeypoints")
cv2.imwrite(f"woo.jpg", img)
# timr("cv2.imwrite")
"""
if __name__ == "__main__":
main()
| [
"numpy.asarray",
"cudasift.i_align_up",
"cudasift.PyCudaSift",
"profiling.TaggedTimer",
"cv2.imread"
] | [((129, 159), 'cudasift.PyCudaSift', 'cudasift.PyCudaSift', ([], {'dev_num': '(0)'}), '(dev_num=0)\n', (148, 159), False, 'import cudasift\n'), ((171, 184), 'profiling.TaggedTimer', 'TaggedTimer', ([], {}), '()\n', (182, 184), False, 'from profiling import TaggedTimer\n'), ((349, 391), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename, cv2.IMREAD_GRAYSCALE)\n', (359, 391), False, 'import cv2\n'), ((434, 476), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename, cv2.IMREAD_GRAYSCALE)\n', (444, 476), False, 'import cv2\n'), ((873, 904), 'cudasift.i_align_up', 'cudasift.i_align_up', (['width', '(128)'], {}), '(width, 128)\n', (892, 904), False, 'import cudasift\n'), ((1862, 1878), 'numpy.asarray', 'np.asarray', (['desc'], {}), '(desc)\n', (1872, 1878), True, 'import numpy as np\n'), ((1895, 1909), 'numpy.asarray', 'np.asarray', (['kp'], {}), '(kp)\n', (1905, 1909), True, 'import numpy as np\n')] |
# - *- coding: utf- 8 - *-
#decorator fonksiyonlar, pythonda fonksiyonlarımızın dinamik olarak ekstra özellik ekledigimiz fonksiyonlarıdr
#kod tekrarını engeller
#flask django gibi frameworklerde cok kullanılır
import time
def calculate_time(func):
def wrapper(numbers):
start = time.time()
result = func(numbers)
end = time.time()
print(func.__name__ + " " + str(end-start) + " saniye surdu.")
return result
return wrapper
@calculate_time
def square(numbers):
result = list()
for i in numbers:
result.append(i**2)
return result
@calculate_time
def cube(numbers):
result = list()
for i in numbers:
result.append(i**3)
return result
square(range(500))
cube(range(500))
| [
"time.time"
] | [((294, 305), 'time.time', 'time.time', ([], {}), '()\n', (303, 305), False, 'import time\n'), ((353, 364), 'time.time', 'time.time', ([], {}), '()\n', (362, 364), False, 'import time\n')] |
#!/usr/bin/env python
"""
Perform the Mann-Whitney U test, the Kolmogorov-Smirnov test, and the Student's
t-test for the following ensembles:
- GPU double precision (reference & control)
- CPU double precision
- GPU single precision
- GPU double precision with additional explicit diffusion
Make sure to compile the cpp files for the Mann-Whitney U test and the
Kolmogorov-Smirnov test first before running this script (see mannwhitneyu.cpp
and kolmogorov_smirnov.cpp).
Copyright (c) 2021 ETH Zurich, <NAME>
MIT License
"""
import numpy as np
import xarray as xr
import pickle
import mannwhitneyu as mwu
import kolmogorov_smirnov as ks
rpert = 'e4' # prefix
n_runs = 50 # total number of runs
n_sel = 100 # how many times we randomly select runs
alpha = 0.05 # significance level
nm = 20 # members per ensemble
u_crit = 127 # nm = 20
t_crit = 2.024 # nm = 20
replace = False # to bootstrap or not to bootstrap
nbins = 100 # Kolmogorov-Smirnov
# Some arrays to make life easier
tests = ['mwu', 'ks', 't']
comparisons = ['c', 'cpu', 'sp', 'diff']
# Variable
variables = ['t_850hPa', 'fi_500hPa', 'u_10m', 't_2m', 'precip', 'asob_t',
'athb_t', 'ps']
path_gpu = '../data/10d_gpu_cpu_sp_diff/gpu_dycore/'
path_cpu = '../data/10d_gpu_cpu_sp_diff/cpu_nodycore/'
path_gpu_sp = '../data/10d_gpu_cpu_sp_diff/gpu_dycore_sp/'
path_gpu_diff = '../data/10d_gpu_cpu_sp_diff/gpu_dycore_diff/'
# Final rejection rates
rej_rates = {}
for comp in comparisons:
rej_rates[comp] = {}
for vname in variables:
rej_rates[comp][vname] = {}
runs_r = {}
runs_c = {}
runs_cpu = {}
runs_sp = {}
runs_diff = {}
# Load data for gpu (reference and control) and cpu
for i in range(n_runs):
i_str_r = str(i).zfill(4)
i_str_c = str(i+n_runs).zfill(4)
fname_r = path_gpu + rpert + '_' + i_str_r + '.nc'
fname_c = path_gpu + rpert + '_' + i_str_c + '.nc'
fname_cpu = path_cpu + rpert + '_' + i_str_r + '.nc'
fname_sp = path_gpu_sp + rpert + '_' + i_str_r + '.nc'
fname_diff = path_gpu_diff + rpert + '_' + i_str_r + '.nc'
runs_r[i] = {}
runs_c[i] = {}
runs_cpu[i] = {}
runs_sp[i] = {}
runs_diff[i] = {}
runs_r[i]['dset'] = xr.open_dataset(fname_r)
runs_c[i]['dset'] = xr.open_dataset(fname_c)
runs_cpu[i]['dset'] = xr.open_dataset(fname_cpu)
runs_sp[i]['dset'] = xr.open_dataset(fname_sp)
runs_diff[i]['dset'] = xr.open_dataset(fname_diff)
# Test for each variable
for vname in variables:
print("----------------------------")
print("Working on " + vname + " ...")
print("----------------------------")
# initialize arrays
nt, ny, nx = runs_r[0]['dset'][vname].shape
values_r = np.zeros((nt, ny, nx, nm))
values_c = np.zeros((nt, ny, nx, nm))
values_cpu = np.zeros((nt, ny, nx, nm))
values_sp = np.zeros((nt, ny, nx, nm))
values_diff = np.zeros((nt, ny, nx, nm))
# For the results
results = {}
for test in tests:
results[test] = {}
for comp in comparisons:
results[test][comp] = np.zeros((n_sel, nt))
# Do test multiple times with random selection of ensemble members
for s in range(n_sel):
if ((s+1) % 10 == 0):
print(str(s+1) + " / " + str(n_sel))
# Pick random samples for comparison
idxs_r = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_c = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_cpu = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_sp = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_diff = np.random.choice(np.arange(n_runs), nm, replace=replace)
# ============================================================
# Mann-Whitney U test
# ============================================================
test = 'mwu'
# Put together arrays
for i in range(nm):
values_r[:,:,:,i] = runs_r[idxs_r[i]]['dset'][vname].values
values_c[:,:,:,i] = runs_c[idxs_c[i]]['dset'][vname].values
values_cpu[:,:,:,i] = runs_cpu[idxs_cpu[i]]['dset'][vname].values
values_sp[:,:,:,i] = runs_sp[idxs_sp[i]]['dset'][vname].values
values_diff[:,:,:,i] = runs_diff[idxs_diff[i]]['dset'][vname].values
# Call test
reject_c = mwu.mwu(values_r, values_c, u_crit)
reject_cpu = mwu.mwu(values_r, values_cpu, u_crit)
reject_sp = mwu.mwu(values_r, values_sp, u_crit)
reject_diff = mwu.mwu(values_r, values_diff, u_crit)
results[test]['c'][s] = np.mean(reject_c, axis=(1,2))
results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2))
results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2))
results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2))
# ============================================================
# Kolmogorov-Smirnov test
# ============================================================
test = 'ks'
# Call test
reject_c = ks.ks(values_r, values_c, nbins)
reject_cpu = ks.ks(values_r, values_cpu, nbins)
reject_sp = ks.ks(values_r, values_sp, nbins)
reject_diff = ks.ks(values_r, values_diff, nbins)
results[test]['c'][s] = np.mean(reject_c, axis=(1,2))
results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2))
results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2))
results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2))
# ============================================================
# Student's t-test
# ============================================================
test = 't'
# Means
mean_r = np.mean(values_r, axis=-1)
mean_c = np.mean(values_c, axis=-1)
mean_cpu = np.mean(values_cpu, axis=-1)
mean_sp = np.mean(values_sp, axis=-1)
mean_diff = np.mean(values_diff, axis=-1)
# Variance
var_r = np.zeros((nt, ny, nx))
var_c = np.zeros((nt, ny, nx))
var_cpu = np.zeros((nt, ny, nx))
var_sp = np.zeros((nt, ny, nx))
var_diff = np.zeros((nt, ny, nx))
for i in range(nm):
var_r += (values_r[:,:,:,i] - mean_r)**2
var_c += (values_c[:,:,:,i] - mean_c)**2
var_cpu += (values_cpu[:,:,:,i] - mean_cpu)**2
var_sp += (values_sp[:,:,:,i] - mean_sp)**2
var_diff += (values_diff[:,:,:,i] - mean_diff)**2
# Unbiased estimator for standard deviation
var_r /= (nm-1)
var_c /= (nm-1)
var_cpu /= (nm-1)
var_sp /= (nm-1)
var_diff /= (nm-1)
stdev_c = np.sqrt(((nm-1) * var_r + (nm-1) * var_c) / (2*nm - 2))
stdev_cpu = np.sqrt(((nm-1) * var_r + (nm-1) * var_cpu) / (2*nm - 2))
stdev_sp = np.sqrt(((nm-1) * var_r + (nm-1) * var_sp) / (2*nm - 2))
stdev_diff = np.sqrt(((nm-1) * var_r + (nm-1) * var_diff) / (2*nm - 2))
# t-value
t_c = np.abs((mean_r - mean_c) / (stdev_c * np.sqrt(2/nm)))
t_cpu = np.abs((mean_r - mean_cpu) / (stdev_cpu * np.sqrt(2/nm)))
t_sp = np.abs((mean_r - mean_sp) / (stdev_sp * np.sqrt(2/nm)))
t_diff = np.abs((mean_r - mean_diff) / (stdev_diff * np.sqrt(2/nm)))
# Rejection arrays
reject_c = t_c > t_crit
reject_cpu = t_cpu > t_crit
reject_sp = t_sp > t_crit
reject_diff = t_diff > t_crit
results[test]['c'][s] = np.mean(reject_c, axis=(1,2))
results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2))
results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2))
results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2))
# Store results
for comp in comparisons:
for test in tests:
res = results[test][comp]
rej_rates[comp][vname][test] = {}
rr = rej_rates[comp][vname][test]
rr['q_05'] = np.quantile(res, 0.5, axis=0)
rr['q_005'] = np.quantile(res, 0.05, axis=0)
rr['q_095'] = np.quantile(res, 0.95, axis=0)
rr['mean'] = np.mean(res, axis=0)
rr['min'] = np.min(res, axis=0)
rr['max'] = np.max(res, axis=0)
rr['reject'] = res
# Save rejection rates
with open('rr_mwu_ks_studt.pickle', 'wb') as handle:
pickle.dump(rej_rates, handle)
| [
"numpy.mean",
"pickle.dump",
"numpy.sqrt",
"kolmogorov_smirnov.ks",
"numpy.max",
"mannwhitneyu.mwu",
"numpy.zeros",
"numpy.quantile",
"numpy.min",
"xarray.open_dataset",
"numpy.arange"
] | [((2243, 2267), 'xarray.open_dataset', 'xr.open_dataset', (['fname_r'], {}), '(fname_r)\n', (2258, 2267), True, 'import xarray as xr\n'), ((2292, 2316), 'xarray.open_dataset', 'xr.open_dataset', (['fname_c'], {}), '(fname_c)\n', (2307, 2316), True, 'import xarray as xr\n'), ((2343, 2369), 'xarray.open_dataset', 'xr.open_dataset', (['fname_cpu'], {}), '(fname_cpu)\n', (2358, 2369), True, 'import xarray as xr\n'), ((2395, 2420), 'xarray.open_dataset', 'xr.open_dataset', (['fname_sp'], {}), '(fname_sp)\n', (2410, 2420), True, 'import xarray as xr\n'), ((2448, 2475), 'xarray.open_dataset', 'xr.open_dataset', (['fname_diff'], {}), '(fname_diff)\n', (2463, 2475), True, 'import xarray as xr\n'), ((2740, 2766), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2748, 2766), True, 'import numpy as np\n'), ((2782, 2808), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2790, 2808), True, 'import numpy as np\n'), ((2826, 2852), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2834, 2852), True, 'import numpy as np\n'), ((2869, 2895), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2877, 2895), True, 'import numpy as np\n'), ((2914, 2940), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx, nm)'], {}), '((nt, ny, nx, nm))\n', (2922, 2940), True, 'import numpy as np\n'), ((8375, 8405), 'pickle.dump', 'pickle.dump', (['rej_rates', 'handle'], {}), '(rej_rates, handle)\n', (8386, 8405), False, 'import pickle\n'), ((4390, 4425), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_c', 'u_crit'], {}), '(values_r, values_c, u_crit)\n', (4397, 4425), True, 'import mannwhitneyu as mwu\n'), ((4447, 4484), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_cpu', 'u_crit'], {}), '(values_r, values_cpu, u_crit)\n', (4454, 4484), True, 'import mannwhitneyu as mwu\n'), ((4505, 4541), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_sp', 'u_crit'], {}), '(values_r, values_sp, u_crit)\n', (4512, 4541), True, 'import mannwhitneyu as mwu\n'), ((4564, 4602), 'mannwhitneyu.mwu', 'mwu.mwu', (['values_r', 'values_diff', 'u_crit'], {}), '(values_r, values_diff, u_crit)\n', (4571, 4602), True, 'import mannwhitneyu as mwu\n'), ((4635, 4665), 'numpy.mean', 'np.mean', (['reject_c'], {'axis': '(1, 2)'}), '(reject_c, axis=(1, 2))\n', (4642, 4665), True, 'import numpy as np\n'), ((4699, 4731), 'numpy.mean', 'np.mean', (['reject_cpu'], {'axis': '(1, 2)'}), '(reject_cpu, axis=(1, 2))\n', (4706, 4731), True, 'import numpy as np\n'), ((4764, 4795), 'numpy.mean', 'np.mean', (['reject_sp'], {'axis': '(1, 2)'}), '(reject_sp, axis=(1, 2))\n', (4771, 4795), True, 'import numpy as np\n'), ((4830, 4863), 'numpy.mean', 'np.mean', (['reject_diff'], {'axis': '(1, 2)'}), '(reject_diff, axis=(1, 2))\n', (4837, 4863), True, 'import numpy as np\n'), ((5099, 5131), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_c', 'nbins'], {}), '(values_r, values_c, nbins)\n', (5104, 5131), True, 'import kolmogorov_smirnov as ks\n'), ((5153, 5187), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_cpu', 'nbins'], {}), '(values_r, values_cpu, nbins)\n', (5158, 5187), True, 'import kolmogorov_smirnov as ks\n'), ((5208, 5241), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_sp', 'nbins'], {}), '(values_r, values_sp, nbins)\n', (5213, 5241), True, 'import kolmogorov_smirnov as ks\n'), ((5264, 5299), 'kolmogorov_smirnov.ks', 'ks.ks', (['values_r', 'values_diff', 'nbins'], {}), '(values_r, values_diff, nbins)\n', (5269, 5299), True, 'import kolmogorov_smirnov as ks\n'), ((5332, 5362), 'numpy.mean', 'np.mean', (['reject_c'], {'axis': '(1, 2)'}), '(reject_c, axis=(1, 2))\n', (5339, 5362), True, 'import numpy as np\n'), ((5396, 5428), 'numpy.mean', 'np.mean', (['reject_cpu'], {'axis': '(1, 2)'}), '(reject_cpu, axis=(1, 2))\n', (5403, 5428), True, 'import numpy as np\n'), ((5461, 5492), 'numpy.mean', 'np.mean', (['reject_sp'], {'axis': '(1, 2)'}), '(reject_sp, axis=(1, 2))\n', (5468, 5492), True, 'import numpy as np\n'), ((5527, 5560), 'numpy.mean', 'np.mean', (['reject_diff'], {'axis': '(1, 2)'}), '(reject_diff, axis=(1, 2))\n', (5534, 5560), True, 'import numpy as np\n'), ((5782, 5808), 'numpy.mean', 'np.mean', (['values_r'], {'axis': '(-1)'}), '(values_r, axis=-1)\n', (5789, 5808), True, 'import numpy as np\n'), ((5826, 5852), 'numpy.mean', 'np.mean', (['values_c'], {'axis': '(-1)'}), '(values_c, axis=-1)\n', (5833, 5852), True, 'import numpy as np\n'), ((5872, 5900), 'numpy.mean', 'np.mean', (['values_cpu'], {'axis': '(-1)'}), '(values_cpu, axis=-1)\n', (5879, 5900), True, 'import numpy as np\n'), ((5919, 5946), 'numpy.mean', 'np.mean', (['values_sp'], {'axis': '(-1)'}), '(values_sp, axis=-1)\n', (5926, 5946), True, 'import numpy as np\n'), ((5967, 5996), 'numpy.mean', 'np.mean', (['values_diff'], {'axis': '(-1)'}), '(values_diff, axis=-1)\n', (5974, 5996), True, 'import numpy as np\n'), ((6033, 6055), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6041, 6055), True, 'import numpy as np\n'), ((6072, 6094), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6080, 6094), True, 'import numpy as np\n'), ((6113, 6135), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6121, 6135), True, 'import numpy as np\n'), ((6153, 6175), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6161, 6175), True, 'import numpy as np\n'), ((6195, 6217), 'numpy.zeros', 'np.zeros', (['(nt, ny, nx)'], {}), '((nt, ny, nx))\n', (6203, 6217), True, 'import numpy as np\n'), ((6726, 6787), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_c) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_c) / (2 * nm - 2))\n', (6733, 6787), True, 'import numpy as np\n'), ((6802, 6865), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_cpu) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_cpu) / (2 * nm - 2))\n', (6809, 6865), True, 'import numpy as np\n'), ((6879, 6941), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_sp) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_sp) / (2 * nm - 2))\n', (6886, 6941), True, 'import numpy as np\n'), ((6957, 7021), 'numpy.sqrt', 'np.sqrt', (['(((nm - 1) * var_r + (nm - 1) * var_diff) / (2 * nm - 2))'], {}), '(((nm - 1) * var_r + (nm - 1) * var_diff) / (2 * nm - 2))\n', (6964, 7021), True, 'import numpy as np\n'), ((7525, 7555), 'numpy.mean', 'np.mean', (['reject_c'], {'axis': '(1, 2)'}), '(reject_c, axis=(1, 2))\n', (7532, 7555), True, 'import numpy as np\n'), ((7589, 7621), 'numpy.mean', 'np.mean', (['reject_cpu'], {'axis': '(1, 2)'}), '(reject_cpu, axis=(1, 2))\n', (7596, 7621), True, 'import numpy as np\n'), ((7654, 7685), 'numpy.mean', 'np.mean', (['reject_sp'], {'axis': '(1, 2)'}), '(reject_sp, axis=(1, 2))\n', (7661, 7685), True, 'import numpy as np\n'), ((7720, 7753), 'numpy.mean', 'np.mean', (['reject_diff'], {'axis': '(1, 2)'}), '(reject_diff, axis=(1, 2))\n', (7727, 7753), True, 'import numpy as np\n'), ((3098, 3119), 'numpy.zeros', 'np.zeros', (['(n_sel, nt)'], {}), '((n_sel, nt))\n', (3106, 3119), True, 'import numpy as np\n'), ((3378, 3395), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3387, 3395), True, 'import numpy as np\n'), ((3452, 3469), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3461, 3469), True, 'import numpy as np\n'), ((3528, 3545), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3537, 3545), True, 'import numpy as np\n'), ((3603, 3620), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3612, 3620), True, 'import numpy as np\n'), ((3680, 3697), 'numpy.arange', 'np.arange', (['n_runs'], {}), '(n_runs)\n', (3689, 3697), True, 'import numpy as np\n'), ((7985, 8014), 'numpy.quantile', 'np.quantile', (['res', '(0.5)'], {'axis': '(0)'}), '(res, 0.5, axis=0)\n', (7996, 8014), True, 'import numpy as np\n'), ((8041, 8071), 'numpy.quantile', 'np.quantile', (['res', '(0.05)'], {'axis': '(0)'}), '(res, 0.05, axis=0)\n', (8052, 8071), True, 'import numpy as np\n'), ((8098, 8128), 'numpy.quantile', 'np.quantile', (['res', '(0.95)'], {'axis': '(0)'}), '(res, 0.95, axis=0)\n', (8109, 8128), True, 'import numpy as np\n'), ((8154, 8174), 'numpy.mean', 'np.mean', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (8161, 8174), True, 'import numpy as np\n'), ((8199, 8218), 'numpy.min', 'np.min', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (8205, 8218), True, 'import numpy as np\n'), ((8243, 8262), 'numpy.max', 'np.max', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (8249, 8262), True, 'import numpy as np\n'), ((7087, 7102), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7094, 7102), True, 'import numpy as np\n'), ((7161, 7176), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7168, 7176), True, 'import numpy as np\n'), ((7232, 7247), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7239, 7247), True, 'import numpy as np\n'), ((7309, 7324), 'numpy.sqrt', 'np.sqrt', (['(2 / nm)'], {}), '(2 / nm)\n', (7316, 7324), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Tests for Yara flows."""
import functools
import psutil
import yara
import yara_procdump
from grr.client.client_actions import tempfiles
from grr.client.client_actions import yara_actions
from grr.lib import flags
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import rdf_yara
from grr.server import aff4
from grr.server import flow
from grr.server.aff4_objects import aff4_grr
from grr.server.flows.general import yara_flows
from grr.test_lib import action_mocks
from grr.test_lib import client_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
test_yara_signature = """
private rule test_rule {
meta:
desc = "Just for testing."
strings:
$s1 = { 31 32 33 34 }
condition:
$s1
}
"""
class FakeMatch(object):
strings = [(100, "$s1", "1234"), (200, "$s1", "1234")]
rule = "test_rule"
class FakeRules(object):
invocations = None
def __init__(self, matching_pids=None, timeout_pids=None):
# Creating a new FakeRule clears the invocation log.
FakeRules.invocations = []
self.matching_pids = matching_pids or []
self.timeout_pids = timeout_pids or []
def match(self, pid=None, timeout=None):
self.invocations.append((pid, timeout))
if pid and pid in self.timeout_pids:
raise yara.TimeoutError("Timeout")
if pid and pid in self.matching_pids:
return [FakeMatch()]
return []
class FakeMemoryBlock(bytearray):
def __init__(self, data="", base=0):
super(FakeMemoryBlock, self).__init__(data)
self._data = data
self.size = len(data)
self.base = base
def data(self):
return self._data
class TestYaraFlows(flow_test_lib.FlowTestsBaseclass):
"""Tests the Yara flows."""
def process(self, processes, pid=None):
if not pid:
return psutil.Process.old_target()
for p in processes:
if p.pid == pid:
return p
raise psutil.NoSuchProcess("No process with pid %d." % pid)
def _RunYaraProcessScan(self, procs, rules, ignore_grr_process=False, **kw):
client_mock = action_mocks.ActionMock(yara_actions.YaraProcessScan)
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(rdf_yara.YaraSignature, "GetRules", lambda self: rules)):
for s in flow_test_lib.TestFlowHelper(
yara_flows.YaraProcessScan.__name__,
client_mock,
yara_signature=test_yara_signature,
client_id=self.client_id,
ignore_grr_process=ignore_grr_process,
token=self.token,
**kw):
session_id = s
flow_obj = aff4.FACTORY.Open(session_id)
self.assertEqual(len(flow_obj.ResultCollection()), 1)
return flow_obj.ResultCollection()[0]
def setUp(self):
super(TestYaraFlows, self).setUp()
self.rules = FakeRules(matching_pids=[101, 102], timeout_pids=[103, 104])
self.procs = [
client_test_lib.MockWindowsProcess(pid=101, name="proc101.exe"),
client_test_lib.MockWindowsProcess(
pid=102, name="proc102.exe", ppid=101),
client_test_lib.MockWindowsProcess(pid=103, name="proc103.exe", ppid=1),
client_test_lib.MockWindowsProcess(
pid=104, name="proc104.exe", ppid=103),
client_test_lib.MockWindowsProcess(pid=105, name="proc105.exe", ppid=1),
client_test_lib.MockWindowsProcess(
pid=106, name="proc106.exe", ppid=104)
]
def testYaraProcessScan(self):
response = self._RunYaraProcessScan(self.procs, self.rules)
self.assertEqual(len(response.matches), 2)
self.assertEqual(len(response.misses), 2)
self.assertEqual(len(response.errors), 2)
for scan_match in response.matches:
for match in scan_match.match:
self.assertEqual(match.rule_name, "test_rule")
self.assertEqual(len(match.string_matches), 2)
for string_match in match.string_matches:
self.assertEqual(string_match.data, "1234")
self.assertEqual(string_match.string_id, "$s1")
self.assertIn(string_match.offset, [100, 200])
def testScanTimingInformation(self):
with test_lib.FakeTime(10000, increment=1):
response = self._RunYaraProcessScan(self.procs, self.rules, pids=[105])
self.assertEqual(len(response.misses), 1)
miss = response.misses[0]
self.assertEqual(miss.scan_time_us, 1 * 1e6)
with test_lib.FakeTime(10000, increment=1):
response = self._RunYaraProcessScan(self.procs, self.rules, pids=[101])
self.assertEqual(len(response.matches), 1)
match = response.matches[0]
self.assertEqual(match.scan_time_us, 1 * 1e6)
def testPIDsRestriction(self):
response = self._RunYaraProcessScan(
self.procs, self.rules, pids=[101, 103, 105])
self.assertEqual(len(response.matches), 1)
self.assertEqual(len(response.misses), 1)
self.assertEqual(len(response.errors), 1)
def testProcessRegex(self):
response = self._RunYaraProcessScan(
self.procs, self.rules, process_regex="10(3|5)")
self.assertEqual(len(response.matches), 0)
self.assertEqual(len(response.misses), 1)
self.assertEqual(len(response.errors), 1)
def testPerProcessTimeout(self):
self._RunYaraProcessScan(self.procs, self.rules, per_process_timeout=50)
self.assertEqual(len(FakeRules.invocations), 6)
for invocation in FakeRules.invocations:
pid, limit = invocation
self.assertLessEqual(101, pid)
self.assertLessEqual(pid, 106)
self.assertEqual(limit, 50)
def _RunProcessDump(self, pids=None, size_limit=None):
def FakeProcessMemoryIterator(pid=None): # pylint: disable=invalid-name
del pid
mem_blocks = [
FakeMemoryBlock("A" * 100, 1024),
FakeMemoryBlock("B" * 100, 2048),
]
for m in mem_blocks:
yield m
procs = self.procs
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(yara_procdump, "process_memory_iterator", FakeProcessMemoryIterator)):
client_mock = action_mocks.MultiGetFileClientMock(
yara_actions.YaraProcessDump, tempfiles.DeleteGRRTempFiles)
for s in flow_test_lib.TestFlowHelper(
yara_flows.YaraDumpProcessMemory.__name__,
client_mock,
pids=pids or [105],
size_limit=size_limit,
client_id=self.client_id,
ignore_grr_process=True,
token=self.token):
session_id = s
flow_obj = aff4.FACTORY.Open(session_id, flow.GRRFlow)
return flow_obj.ResultCollection()
def testYaraProcessDump(self):
results = self._RunProcessDump()
self.assertEqual(len(results), 3)
for result in results:
if isinstance(result, rdf_client.StatEntry):
self.assertIn("proc105.exe_105", result.pathspec.path)
image = aff4.FACTORY.Open(
result.pathspec.AFF4Path(self.client_id), aff4_grr.VFSBlobImage)
data = image.read(1000)
self.assertIn(data, ["A" * 100, "B" * 100])
elif isinstance(result, rdf_yara.YaraProcessDumpResponse):
self.assertEqual(len(result.dumped_processes), 1)
self.assertEqual(result.dumped_processes[0].process.pid, 105)
else:
self.fail("Unexpected result type %s" % type(result))
def testYaraProcessDumpWithLimit(self):
results = self._RunProcessDump(size_limit=150)
# Now we should only get one block (+ the YaraProcessDumpResponse), the
# second is over the limit.
self.assertEqual(len(results), 2)
for result in results:
if isinstance(result, rdf_client.StatEntry):
self.assertIn("proc105.exe_105", result.pathspec.path)
image = aff4.FACTORY.Open(
result.pathspec.AFF4Path(self.client_id), aff4_grr.VFSBlobImage)
data = image.read(1000)
self.assertEqual(data, "A" * 100)
elif isinstance(result, rdf_yara.YaraProcessDumpResponse):
self.assertEqual(len(result.dumped_processes), 1)
self.assertEqual(result.dumped_processes[0].process.pid, 105)
self.assertIn("limit exceeded", result.dumped_processes[0].error)
else:
self.fail("Unexpected result type %s" % type(result))
def testYaraProcessDumpByDefaultErrors(self):
# This tests that not specifying any restrictions on the processes
# to dump does not dump them all which would return tons of data.
client_mock = action_mocks.MultiGetFileClientMock(
yara_actions.YaraProcessDump, tempfiles.DeleteGRRTempFiles)
with self.assertRaises(ValueError):
for _ in flow_test_lib.TestFlowHelper(
yara_flows.YaraDumpProcessMemory.__name__,
client_mock,
client_id=self.client_id,
ignore_grr_process=True,
token=self.token):
pass
def testDumpTimingInformation(self):
with test_lib.FakeTime(100000, 1):
results = self._RunProcessDump()
self.assertGreater(len(results), 1)
self.assertIsInstance(results[0], rdf_yara.YaraProcessDumpResponse)
self.assertEqual(len(results[0].dumped_processes), 1)
self.assertEqual(results[0].dumped_processes[0].dump_time_us, 1 * 1e6)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| [
"psutil.NoSuchProcess",
"grr.lib.flags.StartMain",
"grr.server.aff4.FACTORY.Open",
"grr.test_lib.test_lib.main",
"grr.test_lib.test_lib.FakeTime",
"psutil.Process.old_target",
"yara.TimeoutError",
"functools.partial",
"grr.test_lib.flow_test_lib.TestFlowHelper",
"grr.test_lib.action_mocks.ActionMo... | [((9321, 9340), 'grr.test_lib.test_lib.main', 'test_lib.main', (['argv'], {}), '(argv)\n', (9334, 9340), False, 'from grr.test_lib import test_lib\n'), ((9372, 9393), 'grr.lib.flags.StartMain', 'flags.StartMain', (['main'], {}), '(main)\n', (9387, 9393), False, 'from grr.lib import flags\n'), ((1955, 2008), 'psutil.NoSuchProcess', 'psutil.NoSuchProcess', (["('No process with pid %d.' % pid)"], {}), "('No process with pid %d.' % pid)\n", (1975, 2008), False, 'import psutil\n'), ((2107, 2160), 'grr.test_lib.action_mocks.ActionMock', 'action_mocks.ActionMock', (['yara_actions.YaraProcessScan'], {}), '(yara_actions.YaraProcessScan)\n', (2130, 2160), False, 'from grr.test_lib import action_mocks\n'), ((2706, 2735), 'grr.server.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['session_id'], {}), '(session_id)\n', (2723, 2735), False, 'from grr.server import aff4\n'), ((6611, 6654), 'grr.server.aff4.FACTORY.Open', 'aff4.FACTORY.Open', (['session_id', 'flow.GRRFlow'], {}), '(session_id, flow.GRRFlow)\n', (6628, 6654), False, 'from grr.server import aff4\n'), ((8530, 8630), 'grr.test_lib.action_mocks.MultiGetFileClientMock', 'action_mocks.MultiGetFileClientMock', (['yara_actions.YaraProcessDump', 'tempfiles.DeleteGRRTempFiles'], {}), '(yara_actions.YaraProcessDump, tempfiles\n .DeleteGRRTempFiles)\n', (8565, 8630), False, 'from grr.test_lib import action_mocks\n'), ((1347, 1375), 'yara.TimeoutError', 'yara.TimeoutError', (['"""Timeout"""'], {}), "('Timeout')\n", (1364, 1375), False, 'import yara\n'), ((1853, 1880), 'psutil.Process.old_target', 'psutil.Process.old_target', ([], {}), '()\n', (1878, 1880), False, 'import psutil\n'), ((2391, 2603), 'grr.test_lib.flow_test_lib.TestFlowHelper', 'flow_test_lib.TestFlowHelper', (['yara_flows.YaraProcessScan.__name__', 'client_mock'], {'yara_signature': 'test_yara_signature', 'client_id': 'self.client_id', 'ignore_grr_process': 'ignore_grr_process', 'token': 'self.token'}), '(yara_flows.YaraProcessScan.__name__,\n client_mock, yara_signature=test_yara_signature, client_id=self.\n client_id, ignore_grr_process=ignore_grr_process, token=self.token, **kw)\n', (2419, 2603), False, 'from grr.test_lib import flow_test_lib\n'), ((3000, 3063), 'grr.test_lib.client_test_lib.MockWindowsProcess', 'client_test_lib.MockWindowsProcess', ([], {'pid': '(101)', 'name': '"""proc101.exe"""'}), "(pid=101, name='proc101.exe')\n", (3034, 3063), False, 'from grr.test_lib import client_test_lib\n'), ((3073, 3146), 'grr.test_lib.client_test_lib.MockWindowsProcess', 'client_test_lib.MockWindowsProcess', ([], {'pid': '(102)', 'name': '"""proc102.exe"""', 'ppid': '(101)'}), "(pid=102, name='proc102.exe', ppid=101)\n", (3107, 3146), False, 'from grr.test_lib import client_test_lib\n'), ((3169, 3240), 'grr.test_lib.client_test_lib.MockWindowsProcess', 'client_test_lib.MockWindowsProcess', ([], {'pid': '(103)', 'name': '"""proc103.exe"""', 'ppid': '(1)'}), "(pid=103, name='proc103.exe', ppid=1)\n", (3203, 3240), False, 'from grr.test_lib import client_test_lib\n'), ((3250, 3323), 'grr.test_lib.client_test_lib.MockWindowsProcess', 'client_test_lib.MockWindowsProcess', ([], {'pid': '(104)', 'name': '"""proc104.exe"""', 'ppid': '(103)'}), "(pid=104, name='proc104.exe', ppid=103)\n", (3284, 3323), False, 'from grr.test_lib import client_test_lib\n'), ((3346, 3417), 'grr.test_lib.client_test_lib.MockWindowsProcess', 'client_test_lib.MockWindowsProcess', ([], {'pid': '(105)', 'name': '"""proc105.exe"""', 'ppid': '(1)'}), "(pid=105, name='proc105.exe', ppid=1)\n", (3380, 3417), False, 'from grr.test_lib import client_test_lib\n'), ((3427, 3500), 'grr.test_lib.client_test_lib.MockWindowsProcess', 'client_test_lib.MockWindowsProcess', ([], {'pid': '(106)', 'name': '"""proc106.exe"""', 'ppid': '(104)'}), "(pid=106, name='proc106.exe', ppid=104)\n", (3461, 3500), False, 'from grr.test_lib import client_test_lib\n'), ((4214, 4251), 'grr.test_lib.test_lib.FakeTime', 'test_lib.FakeTime', (['(10000)'], {'increment': '(1)'}), '(10000, increment=1)\n', (4231, 4251), False, 'from grr.test_lib import test_lib\n'), ((4467, 4504), 'grr.test_lib.test_lib.FakeTime', 'test_lib.FakeTime', (['(10000)'], {'increment': '(1)'}), '(10000, increment=1)\n', (4484, 4504), False, 'from grr.test_lib import test_lib\n'), ((6182, 6282), 'grr.test_lib.action_mocks.MultiGetFileClientMock', 'action_mocks.MultiGetFileClientMock', (['yara_actions.YaraProcessDump', 'tempfiles.DeleteGRRTempFiles'], {}), '(yara_actions.YaraProcessDump, tempfiles\n .DeleteGRRTempFiles)\n', (6217, 6282), False, 'from grr.test_lib import action_mocks\n'), ((6304, 6509), 'grr.test_lib.flow_test_lib.TestFlowHelper', 'flow_test_lib.TestFlowHelper', (['yara_flows.YaraDumpProcessMemory.__name__', 'client_mock'], {'pids': '(pids or [105])', 'size_limit': 'size_limit', 'client_id': 'self.client_id', 'ignore_grr_process': '(True)', 'token': 'self.token'}), '(yara_flows.YaraDumpProcessMemory.__name__,\n client_mock, pids=pids or [105], size_limit=size_limit, client_id=self.\n client_id, ignore_grr_process=True, token=self.token)\n', (6332, 6509), False, 'from grr.test_lib import flow_test_lib\n'), ((8690, 8852), 'grr.test_lib.flow_test_lib.TestFlowHelper', 'flow_test_lib.TestFlowHelper', (['yara_flows.YaraDumpProcessMemory.__name__', 'client_mock'], {'client_id': 'self.client_id', 'ignore_grr_process': '(True)', 'token': 'self.token'}), '(yara_flows.YaraDumpProcessMemory.__name__,\n client_mock, client_id=self.client_id, ignore_grr_process=True, token=\n self.token)\n', (8718, 8852), False, 'from grr.test_lib import flow_test_lib\n'), ((8958, 8986), 'grr.test_lib.test_lib.FakeTime', 'test_lib.FakeTime', (['(100000)', '(1)'], {}), '(100000, 1)\n', (8975, 8986), False, 'from grr.test_lib import test_lib\n'), ((2268, 2306), 'functools.partial', 'functools.partial', (['self.process', 'procs'], {}), '(self.process, procs)\n', (2285, 2306), False, 'import functools\n'), ((6041, 6079), 'functools.partial', 'functools.partial', (['self.process', 'procs'], {}), '(self.process, procs)\n', (6058, 6079), False, 'import functools\n')] |
import os
import csv
import pandas as pd
import numpy as np
import test
#gets the path of current working directory
CWD = os.getcwd()
def Load_Data (csvpath):
try:
datapath = CWD + csvpath
df= pd.read_csv(datapath)
except:
print("Error with reading csv: ", csvpath)
print (df.head(n=2))
return (df)
def Readings_Advantix ():
try:
#advantix_cleaned = CWD+ "\\Data\\Cleaned\\data-20190821-pt00.csv"
advantix_raw_path = CWD + "\\Data\\Raw\\raw-20191127-pt01.csv"
#data= pd.read_csv(advantix_cleaned)
advantix_raw= pd.read_csv(advantix_raw_path)
#print (advantix_raw.head())
return (advantix_raw)
except:
print("Can't read raw advantix readings csv")
'''this function reads csv using numpy without pandas (not used in this instance)'''
def Load_Data_np (filename):
print ("starting")
data= np.genfromtxt(file_name, delimiter=',', skipskip_header=1, conconverters= {0: lambda s: str(s)})
return data
#with open(advantix_raw) as csv_file:
# csv_reader = csv.reader(csv_file, delimiter=',')
# for row in csv_reader:
| [
"pandas.read_csv",
"os.getcwd"
] | [((125, 136), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (134, 136), False, 'import os\n'), ((218, 239), 'pandas.read_csv', 'pd.read_csv', (['datapath'], {}), '(datapath)\n', (229, 239), True, 'import pandas as pd\n'), ((595, 625), 'pandas.read_csv', 'pd.read_csv', (['advantix_raw_path'], {}), '(advantix_raw_path)\n', (606, 625), True, 'import pandas as pd\n')] |
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import math
from ._default_matplotlib_figure_dimensions import _default_matplotlib_figure_dimensions
def _calculate_nrows(nplots, ncols):
return math.ceil(nplots / ncols)
def _initialize_plot_with_dimensions(ncols, nrows, figsize_width, figsize_height):
"""
Parameters:
-----------
ncols
Number of columns in the figure.
type: int
nrows
Number of rows in the figure.
type: int
figsize_width
Scaler adjustment of figure width
default: 1
type: float
figsize_height
Scaler adjustment of figure height
default: 1
type: float
Returns:
--------
fig
type: matplotlib.figure.Figure
Notes:
------
"""
fig_dimensions = _default_matplotlib_figure_dimensions()*np.array([ncols * figsize_width, nrows * figsize_height])
fig = plt.figure(figsize=fig_dimensions)
return fig
def _construct_plot_layout(
nplots,
ncols=4,
figsize_width=1,
figsize_height=1,
grid_hspace=0.2,
grid_wspace=0,
width_ratios=False,
height_ratios=False,
):
"""
Creates Axes for each desired plot.
Parameters:
-----------
nplots
ncols
Number of columns.
default: 4
type: int
Returns:
--------
Notes:
------
"""
if np.any(width_ratios) == False:
if nplots <= ncols:
width_ratios = np.ones(ncols)
nrows = _calculate_nrows(nplots, ncols)
if not height_ratios:
height_ratios = np.ones(nrows)
fig = _initialize_plot_with_dimensions(ncols, nrows, figsize_width, figsize_height)
gridspec = GridSpec(nrows,
ncols,
width_ratios=width_ratios,
height_ratios=height_ratios,
hspace=grid_hspace,
wspace=grid_wspace)
plot_count = 0
AxesDict = {}
for ax_i in range(nrows):
AxesDict[ax_i] = {}
for ax_j in range(ncols):
plot_count += 1
AxesDict[ax_i][ax_j] = fig.add_subplot(gridspec[ax_i, ax_j])
if plot_count >= nplots:
break
return fig, AxesDict | [
"math.ceil",
"numpy.ones",
"numpy.any",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec"
] | [((261, 286), 'math.ceil', 'math.ceil', (['(nplots / ncols)'], {}), '(nplots / ncols)\n', (270, 286), False, 'import math\n'), ((980, 1014), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_dimensions'}), '(figsize=fig_dimensions)\n', (990, 1014), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1906), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['nrows', 'ncols'], {'width_ratios': 'width_ratios', 'height_ratios': 'height_ratios', 'hspace': 'grid_hspace', 'wspace': 'grid_wspace'}), '(nrows, ncols, width_ratios=width_ratios, height_ratios=\n height_ratios, hspace=grid_hspace, wspace=grid_wspace)\n', (1791, 1906), False, 'from matplotlib.gridspec import GridSpec\n'), ((912, 969), 'numpy.array', 'np.array', (['[ncols * figsize_width, nrows * figsize_height]'], {}), '([ncols * figsize_width, nrows * figsize_height])\n', (920, 969), True, 'import numpy as np\n'), ((1451, 1471), 'numpy.any', 'np.any', (['width_ratios'], {}), '(width_ratios)\n', (1457, 1471), True, 'import numpy as np\n'), ((1652, 1666), 'numpy.ones', 'np.ones', (['nrows'], {}), '(nrows)\n', (1659, 1666), True, 'import numpy as np\n'), ((1537, 1551), 'numpy.ones', 'np.ones', (['ncols'], {}), '(ncols)\n', (1544, 1551), True, 'import numpy as np\n')] |
from Section01_SOLID.OpenClosePrinciple.BetterFilter import BetterFilter
from Section01_SOLID.OpenClosePrinciple.Color import Color
from Section01_SOLID.OpenClosePrinciple.ColorSpecification import ColorSpecification
from Section01_SOLID.OpenClosePrinciple.Product import Product
from Section01_SOLID.OpenClosePrinciple.ProductFilter import ProductFilter
from Section01_SOLID.OpenClosePrinciple.Size import Size
from Section01_SOLID.OpenClosePrinciple.SizeSpecification import SizeSpecification
# class AndSpecification(Specification):
# def __init__(self, spec1, spec2):
# self.spec2 = spec2
# self.spec1 = spec1
#
# def is_satisfied(self, item):
# return self.spec1.is_satisfied(item) and \
# self.spec2.is_satisfied(item)
apple = Product('Apple', Color.GREEN, Size.SMALL)
tree = Product('Tree', Color.GREEN, Size.LARGE)
house = Product('House', Color.BLUE, Size.LARGE)
products = [apple, tree, house]
pf = ProductFilter()
print('Green products (old):')
for p in pf.filter_by_color(products, Color.GREEN):
print(f' - {p.name} is green')
# ^ BEFORE
# v AFTER
bf = BetterFilter()
print('Green products (new):')
green = ColorSpecification(Color.GREEN)
for p in bf.filter(products, green):
print(f' - {p.name} is green')
print('Large products:')
large = SizeSpecification(Size.LARGE)
for p in bf.filter(products, large):
print(f' - {p.name} is large')
print('Large blue items:')
# large_blue = AndSpecification(large, ColorSpecification(Color.BLUE))
large_blue = large & ColorSpecification(Color.BLUE)
for p in bf.filter(products, large_blue):
print(f' - {p.name} is large and blue')
| [
"Section01_SOLID.OpenClosePrinciple.ColorSpecification.ColorSpecification",
"Section01_SOLID.OpenClosePrinciple.Product.Product",
"Section01_SOLID.OpenClosePrinciple.ProductFilter.ProductFilter",
"Section01_SOLID.OpenClosePrinciple.BetterFilter.BetterFilter",
"Section01_SOLID.OpenClosePrinciple.SizeSpecific... | [((783, 824), 'Section01_SOLID.OpenClosePrinciple.Product.Product', 'Product', (['"""Apple"""', 'Color.GREEN', 'Size.SMALL'], {}), "('Apple', Color.GREEN, Size.SMALL)\n", (790, 824), False, 'from Section01_SOLID.OpenClosePrinciple.Product import Product\n'), ((832, 872), 'Section01_SOLID.OpenClosePrinciple.Product.Product', 'Product', (['"""Tree"""', 'Color.GREEN', 'Size.LARGE'], {}), "('Tree', Color.GREEN, Size.LARGE)\n", (839, 872), False, 'from Section01_SOLID.OpenClosePrinciple.Product import Product\n'), ((881, 921), 'Section01_SOLID.OpenClosePrinciple.Product.Product', 'Product', (['"""House"""', 'Color.BLUE', 'Size.LARGE'], {}), "('House', Color.BLUE, Size.LARGE)\n", (888, 921), False, 'from Section01_SOLID.OpenClosePrinciple.Product import Product\n'), ((961, 976), 'Section01_SOLID.OpenClosePrinciple.ProductFilter.ProductFilter', 'ProductFilter', ([], {}), '()\n', (974, 976), False, 'from Section01_SOLID.OpenClosePrinciple.ProductFilter import ProductFilter\n'), ((1123, 1137), 'Section01_SOLID.OpenClosePrinciple.BetterFilter.BetterFilter', 'BetterFilter', ([], {}), '()\n', (1135, 1137), False, 'from Section01_SOLID.OpenClosePrinciple.BetterFilter import BetterFilter\n'), ((1178, 1209), 'Section01_SOLID.OpenClosePrinciple.ColorSpecification.ColorSpecification', 'ColorSpecification', (['Color.GREEN'], {}), '(Color.GREEN)\n', (1196, 1209), False, 'from Section01_SOLID.OpenClosePrinciple.ColorSpecification import ColorSpecification\n'), ((1316, 1345), 'Section01_SOLID.OpenClosePrinciple.SizeSpecification.SizeSpecification', 'SizeSpecification', (['Size.LARGE'], {}), '(Size.LARGE)\n', (1333, 1345), False, 'from Section01_SOLID.OpenClosePrinciple.SizeSpecification import SizeSpecification\n'), ((1538, 1568), 'Section01_SOLID.OpenClosePrinciple.ColorSpecification.ColorSpecification', 'ColorSpecification', (['Color.BLUE'], {}), '(Color.BLUE)\n', (1556, 1568), False, 'from Section01_SOLID.OpenClosePrinciple.ColorSpecification import ColorSpecification\n')] |
# open external links in a new window
from django.utils.html import escape
from wagtail.contrib.modeladmin.helpers import ButtonHelper
from wagtail.contrib.modeladmin.mixins import ThumbnailMixin
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from wagtail.core import hooks
from wagtail.core.rich_text import LinkHandler
from wagtailorderable.modeladmin.mixins import OrderableMixin
from .models import HomePageImage
class NewWindowExternalLinkHandler(LinkHandler):
# This specifies to do this override for external links only.
# Other identifiers are available for other types of links.
identifier = 'external'
@classmethod
def expand_db_attributes(cls, attrs):
href = attrs["href"]
# Let's add the target attr, and also rel="noopener" + noreferrer fallback.
# See https://github.com/whatwg/html/issues/4078.
return '<a href="%s" target="_blank" rel="noopener noreferrer">' % escape(href)
@hooks.register('register_rich_text_features')
def register_external_link(features):
features.register_link_type(NewWindowExternalLinkHandler)
@hooks.register('construct_page_listing_buttons')
def replace_page_listing_button_item(buttons, page, page_perms, is_parent=False, context=None):
for index, button in enumerate(buttons):
# basic code only - recommend you find a more robust way to confirm this is the add child page button
if button.label == 'Delete' and page.__class__.__name__ == 'HomePageImage':
button.label = 'Remove from home page'
class HomePageImagesButtonHelper(ButtonHelper):
def get_buttons_for_obj(self, obj, exclude=['edit'], classnames_add=None, classnames_exclude=None):
btns = super().get_buttons_for_obj(obj, exclude, classnames_add, classnames_exclude)
return btns
class HomePageImageAdmin(OrderableMixin, ThumbnailMixin, ModelAdmin):
model = HomePageImage
menu_label = 'Edit the Home Page' # ditch this to use verbose_name_plural from model
menu_icon = 'edit' # change as required
menu_order = 1 # will put in 3rd place (000 being 1st, 100 2nd)
add_to_settings_menu = False # or True to add your model to the Settings sub-menu
exclude_from_explorer = False # or True to exclude pages of this type from Wagtail's explorer view
list_display = ('admin_thumb', 'home_page_image', 'installation_page',)
list_per_page = 50
thumb_image_field_name = 'home_page_image'
thumb_image_width = 150
ordering = ['sort_order']
button_helper_class = HomePageImagesButtonHelper
modeladmin_register(HomePageImageAdmin)
| [
"wagtail.contrib.modeladmin.options.modeladmin_register",
"django.utils.html.escape",
"wagtail.core.hooks.register"
] | [((982, 1027), 'wagtail.core.hooks.register', 'hooks.register', (['"""register_rich_text_features"""'], {}), "('register_rich_text_features')\n", (996, 1027), False, 'from wagtail.core import hooks\n'), ((1130, 1178), 'wagtail.core.hooks.register', 'hooks.register', (['"""construct_page_listing_buttons"""'], {}), "('construct_page_listing_buttons')\n", (1144, 1178), False, 'from wagtail.core import hooks\n'), ((2588, 2627), 'wagtail.contrib.modeladmin.options.modeladmin_register', 'modeladmin_register', (['HomePageImageAdmin'], {}), '(HomePageImageAdmin)\n', (2607, 2627), False, 'from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register\n'), ((966, 978), 'django.utils.html.escape', 'escape', (['href'], {}), '(href)\n', (972, 978), False, 'from django.utils.html import escape\n')] |
import sublime, sublime_plugin
import os
try:
# ST3
from ..apis.core import Core
except (ImportError, ValueError):
# ST2
from apis.core import Core
# Completion
class ERBAutocompleteListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
core = Core()
self.completions = []
specialkey = False
scope = core.words.get('scope')
temp = core.get_line_text(view)
lineText = temp[-1]
specialkey = True if lineText.find("<") >= 0 else False
if scope and view.match_selector(locations[0], scope):
self.completions += core.words.get('completions')
self.completions += core.get_custom_tag()
if not self.completions:
return []
completions = list(self.completions)
if specialkey:
for idx, item in enumerate(self.completions):
self.completions[idx][1] = item[1][1:]
completions = [tuple(attr) for attr in self.completions]
return completions
def on_load(self, view):
filename = view.file_name()
if not filename:
return
core = Core()
name = os.path.basename(filename.lower())
if name[-8:] == "html.erb" or name[-3:] == "erb":
try:
view.settings().set('syntax', core.get_grammar_path())
print("Switched syntax to: ERB")
except:
pass
| [
"apis.core.Core"
] | [((315, 321), 'apis.core.Core', 'Core', ([], {}), '()\n', (319, 321), False, 'from apis.core import Core\n'), ((1187, 1193), 'apis.core.Core', 'Core', ([], {}), '()\n', (1191, 1193), False, 'from apis.core import Core\n')] |
from tkinter import *
from tkinter import ttk
from Controller import controleBanco
# Tela de relatorios em xlsx
def TelaRelatorio(tela):
# Cria a tela de relatorios
telaRelatorio = Toplevel(tela)
telaRelatorio.title('RELATORIO')
telaRelatorio.geometry('500x500+620+120')
telaRelatorio['bg'] = 'gray'
telaRelatorio.resizable(False,False)
telaRelatorio.focus_force()
telaRelatorio.grab_set()
listaI = controleBanco.BuscaPrimeiro(3)
listaF = controleBanco.BuscaUltimo(3)
if(listaI == None):
listaDiasI = ' '
listaDiasF = ' '
else:
listaDiasI = listaI
listaDiasF = listaF
# Lables de data
lblDataI = Label(telaRelatorio,text='DATA DE INICIO '+listaDiasI,foreground='black',bg='gray',anchor=W,)
lblDataI.place(x=20,y=30)
lblDataF = Label(telaRelatorio,text='DATA DE FIM '+listaDiasF,foreground='black',bg='gray',anchor=W,)
lblDataF.place(x=20,y=90)
# Pega as datas que foram digitadas e retorna uma tabela com os dados dentro da data
def PegaData():
idInicio = controleBanco.BuscaPrimeiro(2)
idFim = controleBanco.BuscaUltimo(2)
dadosSeparados = controleBanco.BuscaDadosPeloId(idInicio, idFim)
return dadosSeparados
def Escreve():
d = PegaData()
if(d != None):
controleBanco.EscreveNoExcel(d)
telaRelatorio.destroy()
# Botão salver em excel
btn = Button(telaRelatorio,text='SALVAR EM EXCEL',command = Escreve,foreground='white',bg='black')
btn.place(x=370,y=440)
# Formata a tabela
tabela = ttk.Treeview(telaRelatorio)
tabela.place(x=10,y=165,width=465,height=230)
#ESCREVE OS DADOS NA TABELA
def EscreveNaTabela():
dadosFormat = PegaData()
if(dadosFormat != None):
for i in range(len(dadosFormat)):
tabela.insert(parent='',index='end',id=i,text="",values=(dadosFormat[i]))
barraDeRoalgem = Scrollbar(telaRelatorio,orient='vertical',command='')
barraDeRoalgem.place(x=465,y=165,width=15,height=230)
tabela.configure(yscrollcommand=barraDeRoalgem.set)
barraDeRoalgemH = Scrollbar(telaRelatorio,orient='horizontal',command='')
barraDeRoalgemH.place(x=10,y=394,width=470,height=15)
tabela.configure(xscrollcommand=barraDeRoalgemH.set)
dados = controleBanco.ControleLerDados('Nome', 1)
qtDados = len(dados)
teste = ()
teste = teste + ("col0","col1","col2",)
for n in range(0,qtDados):
aux = str(n+3)
teste = teste + ("col"+aux,)
tabela['column'] = teste
tabela.column("#0",width=0,minwidth=0)
tabela.column('col0',width=96,anchor=W,minwidth=96)
tabela.column('col1',width=96,anchor=W,minwidth=96)
tabela.column('col2',width=96,anchor=W,minwidth=96)
for i in range(0,qtDados):
aux = str(i+3)
tabela.column('col'+aux,width=50,anchor=W,minwidth=50)
tabela.heading("#0",text="")
tabela.heading("col0",text="ID",anchor=W)
tabela.heading("col1",text="Data",anchor=W)
tabela.heading("col2",text="Hora",anchor=W)
for i in range(0,qtDados):
aux2 = str(i+3)
tabela.heading('col'+aux2,text="VAR",anchor=W)
EscreveNaTabela() | [
"Controller.controleBanco.ControleLerDados",
"Controller.controleBanco.BuscaPrimeiro",
"Controller.controleBanco.BuscaDadosPeloId",
"Controller.controleBanco.BuscaUltimo",
"Controller.controleBanco.EscreveNoExcel",
"tkinter.ttk.Treeview"
] | [((440, 470), 'Controller.controleBanco.BuscaPrimeiro', 'controleBanco.BuscaPrimeiro', (['(3)'], {}), '(3)\n', (467, 470), False, 'from Controller import controleBanco\n'), ((484, 512), 'Controller.controleBanco.BuscaUltimo', 'controleBanco.BuscaUltimo', (['(3)'], {}), '(3)\n', (509, 512), False, 'from Controller import controleBanco\n'), ((1650, 1677), 'tkinter.ttk.Treeview', 'ttk.Treeview', (['telaRelatorio'], {}), '(telaRelatorio)\n', (1662, 1677), False, 'from tkinter import ttk\n'), ((2401, 2442), 'Controller.controleBanco.ControleLerDados', 'controleBanco.ControleLerDados', (['"""Nome"""', '(1)'], {}), "('Nome', 1)\n", (2431, 2442), False, 'from Controller import controleBanco\n'), ((1116, 1146), 'Controller.controleBanco.BuscaPrimeiro', 'controleBanco.BuscaPrimeiro', (['(2)'], {}), '(2)\n', (1143, 1146), False, 'from Controller import controleBanco\n'), ((1163, 1191), 'Controller.controleBanco.BuscaUltimo', 'controleBanco.BuscaUltimo', (['(2)'], {}), '(2)\n', (1188, 1191), False, 'from Controller import controleBanco\n'), ((1217, 1264), 'Controller.controleBanco.BuscaDadosPeloId', 'controleBanco.BuscaDadosPeloId', (['idInicio', 'idFim'], {}), '(idInicio, idFim)\n', (1247, 1264), False, 'from Controller import controleBanco\n'), ((1389, 1420), 'Controller.controleBanco.EscreveNoExcel', 'controleBanco.EscreveNoExcel', (['d'], {}), '(d)\n', (1417, 1420), False, 'from Controller import controleBanco\n')] |
# -*- coding: utf-8 -*-
from runScraper import test, scrapeRecipeUrl
from parser import parseIngredients
test('https://healthyfitnessmeals.com/artichoke-stuffed-chicken/')
# ingredient = parseIngredients(
# ["cans refrigerated crescent dinner rolls"])
# print(ingredient)
# print(scrapeRecipeUrl)
| [
"runScraper.test"
] | [((107, 173), 'runScraper.test', 'test', (['"""https://healthyfitnessmeals.com/artichoke-stuffed-chicken/"""'], {}), "('https://healthyfitnessmeals.com/artichoke-stuffed-chicken/')\n", (111, 173), False, 'from runScraper import test, scrapeRecipeUrl\n')] |
# Generated by Django 2.2.13 on 2020-10-21 14:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('houses', '0002_house_user_id'),
]
operations = [
migrations.RenameField(
model_name='house',
old_name='user_id',
new_name='user',
),
]
| [
"django.db.migrations.RenameField"
] | [((222, 301), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""house"""', 'old_name': '"""user_id"""', 'new_name': '"""user"""'}), "(model_name='house', old_name='user_id', new_name='user')\n", (244, 301), False, 'from django.db import migrations\n')] |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from .models import User, Listing, Bid, Comment
def index(request):
listings = Listing.objects.all()
return render(request, "auctions/index.html", {
"listings": listings
})
@login_required
def watchlist(request):
mwatchlist = request.user.watchlist.all()
return render(request, "auctions/watchlist.html", {
"watchlist": mwatchlist
})
def listing(request, id):
listings = Listing.objects.all().filter(id=id)
if not listings:
return render(request, "auctions/error.html", {
"message": "is not a Listing."
})
mlisting = listings[0]
is_watching = False
if request.user.is_authenticated:
is_watching = mlisting in request.user.watchlist.all() # T or F
message = ""
# When we're posting.
if request.method == "POST":
if "bidprice" in request.POST.keys():
bidprice = request.POST["bidprice"]
if bidprice:
bidprice = int(bidprice)
if (mlisting.current_price == mlisting.starting_price and
bidprice < mlisting.starting_price) or \
(mlisting.current_price != mlisting.starting_price and
bidprice <= mlisting.current_price):
message = "Bidprice must be higher than current \
bid, if there is one. If there isn't one, it must \
be as large as the starting bid."
else:
bid = Bid(bidder=request.user, listing=mlisting,
amount=bidprice)
bid.save()
mlisting.current_price = mlisting.max_bid()
mlisting.save()
elif "comment_text" in request.POST.keys():
comment_text = request.POST["comment_text"]
if comment_text:
comment = Comment(commenter=request.user, listing=mlisting,
comment_text=comment_text)
comment.save()
elif "add_watchlist" in request.POST.keys():
if not is_watching:
request.user.watchlist.add(mlisting)
else:
request.user.watchlist.remove(mlisting)
elif "close_listing" in request.POST.keys():
mlisting.is_open = False
mlisting.save()
winner = mlisting.max_bidder()
message = f"Listing is closed. Winner is {winner}"
if request.user.is_authenticated:
is_watching = mlisting in request.user.watchlist.all() # T or F
# Rendering the page.
return render(request, "auctions/listing.html", {
"winner": mlisting.max_bidder(),
"message": message,
"listing": mlisting,
"is_watching": is_watching,
"comments": mlisting.lcomments.all(),
"is_creator": request.user == mlisting.seller # T or F
})
@login_required
def createlisting(request):
message = ""
if request.method == "POST":
title = request.POST["title"]
description = request.POST["description"]
startingbid = request.POST["startingbid"]
url = request.POST["url"]
seller = request.user
listing = Listing(title=title, description=description,
current_price=startingbid,
starting_price=startingbid,
url=url, seller=seller, is_open=True)
listing.save()
message = "Listing was created"
return render(request, "auctions/createlisting.html", {
"message": message
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "auctions/login.html")
@login_required
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/register.html")
| [
"django.shortcuts.render",
"django.contrib.auth.authenticate",
"django.contrib.auth.login",
"django.urls.reverse",
"django.contrib.auth.logout"
] | [((401, 463), 'django.shortcuts.render', 'render', (['request', '"""auctions/index.html"""', "{'listings': listings}"], {}), "(request, 'auctions/index.html', {'listings': listings})\n", (407, 463), False, 'from django.shortcuts import render\n'), ((577, 646), 'django.shortcuts.render', 'render', (['request', '"""auctions/watchlist.html"""', "{'watchlist': mwatchlist}"], {}), "(request, 'auctions/watchlist.html', {'watchlist': mwatchlist})\n", (583, 646), False, 'from django.shortcuts import render\n'), ((3792, 3860), 'django.shortcuts.render', 'render', (['request', '"""auctions/createlisting.html"""', "{'message': message}"], {}), "(request, 'auctions/createlisting.html', {'message': message})\n", (3798, 3860), False, 'from django.shortcuts import render\n'), ((4563, 4578), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (4569, 4578), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((776, 848), 'django.shortcuts.render', 'render', (['request', '"""auctions/error.html"""', "{'message': 'is not a Listing.'}"], {}), "(request, 'auctions/error.html', {'message': 'is not a Listing.'})\n", (782, 848), False, 'from django.shortcuts import render\n'), ((4073, 4132), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': 'username', 'password': 'password'}), '(request, username=username, password=password)\n', (4085, 4132), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((4476, 4514), 'django.shortcuts.render', 'render', (['request', '"""auctions/login.html"""'], {}), "(request, 'auctions/login.html')\n", (4482, 4514), False, 'from django.shortcuts import render\n'), ((4611, 4627), 'django.urls.reverse', 'reverse', (['"""index"""'], {}), "('index')\n", (4618, 4627), False, 'from django.urls import reverse\n'), ((5395, 5415), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (5400, 5415), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((5495, 5536), 'django.shortcuts.render', 'render', (['request', '"""auctions/register.html"""'], {}), "(request, 'auctions/register.html')\n", (5501, 5536), False, 'from django.shortcuts import render\n'), ((4220, 4240), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (4225, 4240), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((4332, 4424), 'django.shortcuts.render', 'render', (['request', '"""auctions/login.html"""', "{'message': 'Invalid username and/or password.'}"], {}), "(request, 'auctions/login.html', {'message':\n 'Invalid username and/or password.'})\n", (4338, 4424), False, 'from django.shortcuts import render\n'), ((4969, 5048), 'django.shortcuts.render', 'render', (['request', '"""auctions/register.html"""', "{'message': 'Passwords must match.'}"], {}), "(request, 'auctions/register.html', {'message': 'Passwords must match.'})\n", (4975, 5048), False, 'from django.shortcuts import render\n'), ((5452, 5468), 'django.urls.reverse', 'reverse', (['"""index"""'], {}), "('index')\n", (5459, 5468), False, 'from django.urls import reverse\n'), ((4281, 4297), 'django.urls.reverse', 'reverse', (['"""index"""'], {}), "('index')\n", (4288, 4297), False, 'from django.urls import reverse\n'), ((5275, 5360), 'django.shortcuts.render', 'render', (['request', '"""auctions/register.html"""', "{'message': 'Username already taken.'}"], {}), "(request, 'auctions/register.html', {'message':\n 'Username already taken.'})\n", (5281, 5360), False, 'from django.shortcuts import render\n')] |
import sys
import logging
from face_client import face_client
from face_client.api_proxy import FaceApiProxy
from face_client.camera_controller import CameraController
from face_client.image_displayer import ImageDisplayer
def main(argv):
client = face_client.FaceClient(CameraController(), ImageDisplayer(), FaceApiProxy())
client.start()
def run_main(): # pylint: disable=invalid-name
try:
sys.exit(main(sys.argv))
except Exception as e:
logging.exception('face client crashed...')
sys.exit(1)
| [
"face_client.api_proxy.FaceApiProxy",
"face_client.image_displayer.ImageDisplayer",
"logging.exception",
"face_client.camera_controller.CameraController",
"sys.exit"
] | [((278, 296), 'face_client.camera_controller.CameraController', 'CameraController', ([], {}), '()\n', (294, 296), False, 'from face_client.camera_controller import CameraController\n'), ((298, 314), 'face_client.image_displayer.ImageDisplayer', 'ImageDisplayer', ([], {}), '()\n', (312, 314), False, 'from face_client.image_displayer import ImageDisplayer\n'), ((316, 330), 'face_client.api_proxy.FaceApiProxy', 'FaceApiProxy', ([], {}), '()\n', (328, 330), False, 'from face_client.api_proxy import FaceApiProxy\n'), ((478, 521), 'logging.exception', 'logging.exception', (['"""face client crashed..."""'], {}), "('face client crashed...')\n", (495, 521), False, 'import logging\n'), ((530, 541), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (538, 541), False, 'import sys\n')] |
"""
Categorize mapping between Dutch and English into polysemy profiles
Usage:
categorize_lemma_to_lemma_polysemy.py --config_path=<config_path> --input_folder=<input_folder> --output_folder=<output_folder> --rbn_pos=<rbn_pos> --fn_pos=<fn_pos> --verbose=<verbose>
Options:
--config_path=<config_path>
--input_folder=<input_folder> input folder with output from combine_resources.py (graph.p and combined.p)
--rbn_pos=<rbn_pos> noun verb adjective adverb other
--fn_pos=<fn_pos> supported: N V A Adv
--output_folder=<output_folder>
--verbose=<verbose> 0 --> no stdout 1 --> general stdout 2 --> detailed stdout
Example:
python categorize_lemma_to_lemma_polysemy.py --config_path="config_files/v0.json" --input_folder="output" --output_folder="polysemy_profiles" --rbn_pos="verb" --fn_pos="V" --verbose=1
"""
from collections import defaultdict
import sys
import pickle
import os
import json
import pathlib
from docopt import docopt
from datetime import datetime
import networkx as nx
import graph_utils
import load_utils
odwn_classes = load_utils.load_python_module(module_path='../resources/ODWN_Reader',
module_name='odwn_classes',
verbose=1)
utils = load_utils.load_python_module(module_path='../resources/ODWN_Reader',
module_name='utils',
verbose=1)
def polysemy_profiles_to_category(polysemy_nl, polysemy_en):
"""
"""
if polysemy_nl == 1:
if polysemy_en == 1:
category = 'm2m'
elif polysemy_en >= 2:
category = 'm2p'
elif polysemy_nl >= 2:
if polysemy_en == 1:
category = 'p2m'
elif polysemy_en >= 2:
category = 'p2p'
return category
assert polysemy_profiles_to_category(1, 1) == 'm2m'
assert polysemy_profiles_to_category(1, 2) == 'm2p'
assert polysemy_profiles_to_category(2, 1) == 'p2m'
assert polysemy_profiles_to_category(2, 2) == 'p2p'
# load arguments
arguments = docopt(__doc__)
print()
print('PROVIDED ARGUMENTS')
print(arguments)
print()
config_path = arguments['--config_path']
assert os.path.exists(config_path), f'{config_path} does not exist'
configuration = json.load(open(config_path))
verbose = int(arguments['--verbose'])
out_dir = pathlib.Path(arguments['--output_folder'])
if not out_dir.exists():
out_dir.mkdir(parents=True)
# load objects
rbn_objs = pickle.load(open(configuration["path_rbn_objs"], 'rb'))
fn_obj = pickle.load(open(f"{arguments['--input_folder']}/combined.p", "rb"))
g = nx.read_gpickle(f"{arguments['--input_folder']}/graph.p")
# load polysemy info
lemma_pos2frame_objs = defaultdict(list)
for frame_label, frame_obj in fn_obj.framelabel2frame_obj.items():
for lu_id, lu_obj in frame_obj.lu_id2lu_obj.items():
lemma_pos2frame_objs[(lu_obj.lexeme,
lu_obj.pos)].append(frame_obj)
pol_info_df, \
pol_df, \
lemma_pos2le_ids = utils.load_polysemy_info(rbn_objs,
pos={arguments['--rbn_pos']})
# query for paths
print('starting querying for paths', datetime.now())
all_paths = graph_utils.get_paths_from_rbn_to_fn(g,
from_prefix=f'(Dutch)',
to_prefix=f'LU-',
from_suffix=f'.{arguments["--fn_pos"]}',
maximum_length_path=1,
verbose=verbose)
print('ended querying for paths', datetime.now())
print(f'number of paths found: {len(all_paths)}')
maximum_path_length = 2
statistics = defaultdict(set)
for path in all_paths:
node_nl, node_en = path
lemma_nl = g.node[node_nl]['attr']['lemma']
pos_nl = g.node[node_nl]['attr']['pos']
pol_nl = len(lemma_pos2le_ids[(lemma_nl, pos_nl)])
lemma_en = g.node[node_en]['attr']['lemma']
pos_en = g.node[node_en]['attr']['pos']
pol_en = len(lemma_pos2frame_objs[(lemma_en, pos_en)])
assert pos_nl == pos_en
polysemy_cat = polysemy_profiles_to_category(pol_nl,
pol_en)
value = ((lemma_nl, pos_nl), (lemma_en, pos_en))
key = polysemy_cat
statistics[key].add(value)
# save to file
output_path = out_dir / f'{arguments["--fn_pos"]}.p'
with open(output_path, 'wb') as outfile:
pickle.dump(statistics, outfile)
if verbose:
for cat, items in statistics.items():
print(cat, len(items))
print(f'saved output to {output_path}')
print(datetime.now())
| [
"os.path.exists",
"pickle.dump",
"pathlib.Path",
"graph_utils.get_paths_from_rbn_to_fn",
"datetime.datetime.now",
"collections.defaultdict",
"load_utils.load_python_module",
"networkx.read_gpickle",
"docopt.docopt"
] | [((1071, 1183), 'load_utils.load_python_module', 'load_utils.load_python_module', ([], {'module_path': '"""../resources/ODWN_Reader"""', 'module_name': '"""odwn_classes"""', 'verbose': '(1)'}), "(module_path='../resources/ODWN_Reader',\n module_name='odwn_classes', verbose=1)\n", (1100, 1183), False, 'import load_utils\n'), ((1278, 1383), 'load_utils.load_python_module', 'load_utils.load_python_module', ([], {'module_path': '"""../resources/ODWN_Reader"""', 'module_name': '"""utils"""', 'verbose': '(1)'}), "(module_path='../resources/ODWN_Reader',\n module_name='utils', verbose=1)\n", (1307, 1383), False, 'import load_utils\n'), ((2089, 2104), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (2095, 2104), False, 'from docopt import docopt\n'), ((2215, 2242), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (2229, 2242), False, 'import os\n'), ((2370, 2412), 'pathlib.Path', 'pathlib.Path', (["arguments['--output_folder']"], {}), "(arguments['--output_folder'])\n", (2382, 2412), False, 'import pathlib\n'), ((2635, 2692), 'networkx.read_gpickle', 'nx.read_gpickle', (['f"""{arguments[\'--input_folder\']}/graph.p"""'], {}), '(f"{arguments[\'--input_folder\']}/graph.p")\n', (2650, 2692), True, 'import networkx as nx\n'), ((2738, 2755), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2749, 2755), False, 'from collections import defaultdict\n'), ((3225, 3396), 'graph_utils.get_paths_from_rbn_to_fn', 'graph_utils.get_paths_from_rbn_to_fn', (['g'], {'from_prefix': 'f"""(Dutch)"""', 'to_prefix': 'f"""LU-"""', 'from_suffix': 'f""".{arguments[\'--fn_pos\']}"""', 'maximum_length_path': '(1)', 'verbose': 'verbose'}), '(g, from_prefix=f\'(Dutch)\', to_prefix=\n f\'LU-\', from_suffix=f".{arguments[\'--fn_pos\']}", maximum_length_path=1,\n verbose=verbose)\n', (3261, 3396), False, 'import graph_utils\n'), ((3771, 3787), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3782, 3787), False, 'from collections import defaultdict\n'), ((3197, 3211), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3209, 3211), False, 'from datetime import datetime\n'), ((3667, 3681), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3679, 3681), False, 'from datetime import datetime\n'), ((4507, 4539), 'pickle.dump', 'pickle.dump', (['statistics', 'outfile'], {}), '(statistics, outfile)\n', (4518, 4539), False, 'import pickle\n'), ((4680, 4694), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4692, 4694), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
"""
corrections.py: Script to apply corrections to the images.
"""
import os
from argparse import ArgumentParser
from datetime import date, datetime
from typing import Optional, Sequence
import numpy as np
from astropy.io import fits
from dresscode.utils import load_config
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = ArgumentParser()
parser.add_argument(
"-c", "--config", help="path to config.txt", default="config.txt"
)
args = parser.parse_args(argv)
config = load_config(args.config)
galaxy = config["galaxy"]
path = config["path"] + galaxy + "/working_dir/"
years = config["years"]
# Loop over the different years.
for year in years:
print("Year: " + year)
yearpath = path + year + "/"
# PART 1: Apply a coincidence loss correction.
print("Applying coincidence loss corrections...")
if os.path.isfile(yearpath + "sum_um2_nm.img"):
coicorr(yearpath + "sum_um2_nm.img")
if os.path.isfile(yearpath + "sum_uw2_nm.img"):
coicorr(yearpath + "sum_uw2_nm.img")
if os.path.isfile(yearpath + "sum_uw1_nm.img"):
coicorr(yearpath + "sum_uw1_nm.img")
# PART 2: Apply a large scale sensitivity correction.
print("Applying large scale sensitivity corrections...")
if os.path.isfile(yearpath + "sum_um2_nm_coi.img"):
lsscorr(yearpath + "sum_um2_nm_coi.img")
if os.path.isfile(yearpath + "sum_uw2_nm_coi.img"):
lsscorr(yearpath + "sum_uw2_nm_coi.img")
if os.path.isfile(yearpath + "sum_uw1_nm_coi.img"):
lsscorr(yearpath + "sum_uw1_nm_coi.img")
# PART 3: Apply a zero point correction.
print("Applying zero point corrections...")
if os.path.isfile(yearpath + "sum_um2_nm_coilss.img"):
zeropoint(yearpath + "sum_um2_nm_coilss.img", -2.330e-3, -1.361e-3)
if os.path.isfile(yearpath + "sum_uw2_nm_coilss.img"):
zeropoint(yearpath + "sum_uw2_nm_coilss.img", 1.108e-3, -1.960e-3)
if os.path.isfile(yearpath + "sum_uw1_nm_coilss.img"):
zeropoint(yearpath + "sum_uw1_nm_coilss.img", 2.041e-3, -1.748e-3)
return 0
# Functions for PART 1: Coincidence loss correction.
def coicorr(filename):
# Open the image. Create arrays with zeros with the shape of the image.
hdulist = fits.open(filename)
data = hdulist[0].data
header = hdulist[0].header
total_flux = np.full_like(data, np.nan, dtype=np.float64)
std = np.full_like(data, np.nan, dtype=np.float64)
# Loop over all pixels and for each pixel: sum the flux densities (count rates) of
# the 9x9 surrounding pixels: Craw (counts/s). Calculate the standard deviation in
# the 9x9 pixels box.
for x in range(5, data.shape[1] - 5):
for y in range(5, data.shape[0] - 5):
total_flux[y, x] = np.sum(data[y - 4 : y + 5, x - 4 : x + 5])
std[y, x] = np.std(data[y - 4 : y + 5, x - 4 : x + 5])
# Obtain the dead time correction factor and the frame time (in s) from the header
# of the image.
alpha = header["DEADC"]
ft = header["FRAMTIME"]
# Calculate the total number of counts in the 9x9 pixels box: x = Craw*ft (counts).
# Calculate the minimum and maximum possible number of counts in the 9x9 pixels box.
total_counts = ft * total_flux
total_counts_min = ft * (total_flux - 81 * std)
total_counts_max = ft * (total_flux + 81 * std)
# Calculate the polynomial correction factor and the minimum and maximum possible
# polynomial correction factor.
f = polynomial(total_counts)
f_min = polynomial(total_counts_min)
f_max = polynomial(total_counts_max)
# If alpha*total_counts_max is larger than 1, replace this value by 0.99. Otherwise,
# the maximum possible theoretical coincidence-loss-corrected count rate will be NaN
# in these pixels.
if np.sum(alpha * total_counts_max >= 1.0) != 0:
print(
"Warning: The following pixels have very high fluxes. The uncertainty on "
"the correction factor for these pixels is not to be trusted!",
np.where(alpha * total_counts_max >= 1.0),
)
total_counts_max[alpha * total_counts_max >= 1.0] = 0.99 / alpha
# Calculate the theoretical coincidence-loss-corrected count rate:
# Ctheory = -ln(1 - alpha*Craw*ft) / (alpha*ft) (counts/s).
# Calculate the minimum and maximum possible theoretical coincidence-loss-corrected
# count rate.
Ctheory = -np.log1p(-alpha * total_counts) / (alpha * ft)
Ctheory_min = -np.log1p(-alpha * total_counts_min) / (alpha * ft)
Ctheory_max = -np.log1p(-alpha * total_counts_max) / (alpha * ft)
# Calculate the coincidence loss correction factor:
# Ccorrfactor = Ctheory*f(x)/Craw.
# Calculate the minimum and maximum possible coincidence loss correction factor.
corrfactor = (Ctheory * f) / total_flux
corrfactor_min = (Ctheory_min * f_min) / (total_flux - 81 * std)
corrfactor_max = (Ctheory_max * f_max) / (total_flux + 81 * std)
# Apply the coincidence loss correction to the data. Apply the minimum and maximum
# coincidence loss correction to the data.
new_data = corrfactor * data
new_data_min = corrfactor_min * data
new_data_max = corrfactor_max * data
# Calculate the uncertainty and the relative uncertainty on the coincidence loss
# correction. Put the relative uncertainty to 0 if the uncertainty is 0 (because in
# those pixels the flux is also 0 and the relative uncertainty would be NaN).
coicorr_unc = np.maximum(
np.abs(new_data - new_data_min), np.abs(new_data_max - new_data)
)
coicorr_rel = coicorr_unc / new_data
coicorr_rel[coicorr_unc == 0.0] = 0.0
print(
"The median coincidence loss correction factor for image "
+ os.path.basename(filename)
+ " is "
+ str(np.nanmedian(corrfactor))
+ " and the median relative uncertainty on the corrected data is "
+ str(np.nanmedian(coicorr_rel))
+ "."
)
# Adapt the header. Write the corrected data, the applied coincidence loss
# correction and the relative uncertainty to a new image.
header["PLANE0"] = "primary (counts/s)"
header["PLANE1"] = "coincidence loss correction factor"
header["PLANE2"] = "relative coincidence loss correction uncertainty (fraction)"
datacube = [new_data, corrfactor, coicorr_rel]
new_hdu = fits.PrimaryHDU(datacube, header)
new_hdu.writeto(filename.replace(".img", "_coi.img"), overwrite=True)
print(os.path.basename(filename) + " has been corrected for coincidence loss.")
# Function to calculate the empirical polynomial correction to account for the
# differences between the observed and theoretical coincidence loss correction:
# f(x) = 1 + a1x + a2x**2 + a3x**3 + a4x**4.
def polynomial(x):
a1 = 0.0658568
a2 = -0.0907142
a3 = 0.0285951
a4 = 0.0308063
return 1 + (a1 * x) + (a2 * x ** 2) + (a3 * x ** 3) + (a4 * x ** 4)
# Function for PART 2: Large scale sensitivity correction.
def lsscorr(filename):
# Open the image and the large scale sensitivity map.
hdulist = fits.open(filename)
data = hdulist[0].data[0]
coicorr = hdulist[0].data[1]
coicorr_rel = hdulist[0].data[2]
header = hdulist[0].header
lss_hdulist = fits.open(filename.replace("nm_coi", "lss"))
lss_data = lss_hdulist[1].data
# Apply the large scale sensitivity correction to the data.
new_data = data / lss_data
new_datacube = [new_data, coicorr, coicorr_rel]
# Write the corrected data to a new image.
new_hdu = fits.PrimaryHDU(new_datacube, header)
new_hdu.writeto(filename.replace(".img", "lss.img"), overwrite=True)
print(
os.path.basename(filename)
+ " has been corrected for large scale sensitivity variations."
)
# Function for PART 3: Zero point correction.
def zeropoint(filename, param1, param2):
# Open the file.
hdulist = fits.open(filename)
data = hdulist[0].data[0]
coicorr = hdulist[0].data[1]
coicorr_rel = hdulist[0].data[2]
header = hdulist[0].header
# Calculate the average date of observation.
start_date = datetime.strptime(header["DATE-OBS"].split("T")[0], "%Y-%m-%d").date()
end_date = datetime.strptime(header["DATE-END"].split("T")[0], "%Y-%m-%d").date()
obs_date = (end_date - start_date) / 2 + start_date
# Calculate the number of years that have elapsed since the 1st of January 2005.
first_date = date(2005, 1, 1)
elapsed_time = obs_date - first_date
years_passed = elapsed_time.days / 365.25
# Calculate the zero point correction.
zerocorr = 1 + param1 * years_passed + param2 * years_passed ** 2
# Apply the correction to the data.
new_data = data / zerocorr
# Adapt the header. Write the corrected data to a new image.
header["ZPCORR"] = zerocorr
datacube = [new_data, coicorr, coicorr_rel]
new_hdu = fits.PrimaryHDU(datacube, header)
new_hdu.writeto(filename.replace(".img", "zp.img"), overwrite=True)
print(
os.path.basename(filename)
+ " has been corrected for sensitivity loss of the detector over time."
)
if __name__ == "__main__":
exit(main())
| [
"numpy.abs",
"numpy.full_like",
"astropy.io.fits.PrimaryHDU",
"argparse.ArgumentParser",
"dresscode.utils.load_config",
"numpy.where",
"numpy.std",
"numpy.nanmedian",
"os.path.isfile",
"numpy.sum",
"datetime.date",
"os.path.basename",
"astropy.io.fits.open",
"numpy.log1p"
] | [((371, 387), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (385, 387), False, 'from argparse import ArgumentParser\n'), ((542, 566), 'dresscode.utils.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (553, 566), False, 'from dresscode.utils import load_config\n'), ((2419, 2438), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (2428, 2438), False, 'from astropy.io import fits\n'), ((2514, 2558), 'numpy.full_like', 'np.full_like', (['data', 'np.nan'], {'dtype': 'np.float64'}), '(data, np.nan, dtype=np.float64)\n', (2526, 2558), True, 'import numpy as np\n'), ((2569, 2613), 'numpy.full_like', 'np.full_like', (['data', 'np.nan'], {'dtype': 'np.float64'}), '(data, np.nan, dtype=np.float64)\n', (2581, 2613), True, 'import numpy as np\n'), ((6540, 6573), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['datacube', 'header'], {}), '(datacube, header)\n', (6555, 6573), False, 'from astropy.io import fits\n'), ((7263, 7282), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (7272, 7282), False, 'from astropy.io import fits\n'), ((7723, 7760), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['new_datacube', 'header'], {}), '(new_datacube, header)\n', (7738, 7760), False, 'from astropy.io import fits\n'), ((8083, 8102), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (8092, 8102), False, 'from astropy.io import fits\n'), ((8615, 8631), 'datetime.date', 'date', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (8619, 8631), False, 'from datetime import date, datetime\n'), ((9065, 9098), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['datacube', 'header'], {}), '(datacube, header)\n', (9080, 9098), False, 'from astropy.io import fits\n'), ((935, 978), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_um2_nm.img')"], {}), "(yearpath + 'sum_um2_nm.img')\n", (949, 978), False, 'import os\n'), ((1040, 1083), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw2_nm.img')"], {}), "(yearpath + 'sum_uw2_nm.img')\n", (1054, 1083), False, 'import os\n'), ((1145, 1188), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw1_nm.img')"], {}), "(yearpath + 'sum_uw1_nm.img')\n", (1159, 1188), False, 'import os\n'), ((1379, 1426), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_um2_nm_coi.img')"], {}), "(yearpath + 'sum_um2_nm_coi.img')\n", (1393, 1426), False, 'import os\n'), ((1492, 1539), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw2_nm_coi.img')"], {}), "(yearpath + 'sum_uw2_nm_coi.img')\n", (1506, 1539), False, 'import os\n'), ((1605, 1652), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw1_nm_coi.img')"], {}), "(yearpath + 'sum_uw1_nm_coi.img')\n", (1619, 1652), False, 'import os\n'), ((1821, 1871), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_um2_nm_coilss.img')"], {}), "(yearpath + 'sum_um2_nm_coilss.img')\n", (1835, 1871), False, 'import os\n'), ((1964, 2014), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw2_nm_coilss.img')"], {}), "(yearpath + 'sum_uw2_nm_coilss.img')\n", (1978, 2014), False, 'import os\n'), ((2106, 2156), 'os.path.isfile', 'os.path.isfile', (["(yearpath + 'sum_uw1_nm_coilss.img')"], {}), "(yearpath + 'sum_uw1_nm_coilss.img')\n", (2120, 2156), False, 'import os\n'), ((3972, 4011), 'numpy.sum', 'np.sum', (['(alpha * total_counts_max >= 1.0)'], {}), '(alpha * total_counts_max >= 1.0)\n', (3978, 4011), True, 'import numpy as np\n'), ((5681, 5712), 'numpy.abs', 'np.abs', (['(new_data - new_data_min)'], {}), '(new_data - new_data_min)\n', (5687, 5712), True, 'import numpy as np\n'), ((5714, 5745), 'numpy.abs', 'np.abs', (['(new_data_max - new_data)'], {}), '(new_data_max - new_data)\n', (5720, 5745), True, 'import numpy as np\n'), ((2934, 2972), 'numpy.sum', 'np.sum', (['data[y - 4:y + 5, x - 4:x + 5]'], {}), '(data[y - 4:y + 5, x - 4:x + 5])\n', (2940, 2972), True, 'import numpy as np\n'), ((3001, 3039), 'numpy.std', 'np.std', (['data[y - 4:y + 5, x - 4:x + 5]'], {}), '(data[y - 4:y + 5, x - 4:x + 5])\n', (3007, 3039), True, 'import numpy as np\n'), ((4208, 4249), 'numpy.where', 'np.where', (['(alpha * total_counts_max >= 1.0)'], {}), '(alpha * total_counts_max >= 1.0)\n', (4216, 4249), True, 'import numpy as np\n'), ((4587, 4618), 'numpy.log1p', 'np.log1p', (['(-alpha * total_counts)'], {}), '(-alpha * total_counts)\n', (4595, 4618), True, 'import numpy as np\n'), ((4653, 4688), 'numpy.log1p', 'np.log1p', (['(-alpha * total_counts_min)'], {}), '(-alpha * total_counts_min)\n', (4661, 4688), True, 'import numpy as np\n'), ((4723, 4758), 'numpy.log1p', 'np.log1p', (['(-alpha * total_counts_max)'], {}), '(-alpha * total_counts_max)\n', (4731, 4758), True, 'import numpy as np\n'), ((6659, 6685), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (6675, 6685), False, 'import os\n'), ((7854, 7880), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (7870, 7880), False, 'import os\n'), ((9191, 9217), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9207, 9217), False, 'import os\n'), ((6097, 6122), 'numpy.nanmedian', 'np.nanmedian', (['coicorr_rel'], {}), '(coicorr_rel)\n', (6109, 6122), True, 'import numpy as np\n'), ((5982, 6006), 'numpy.nanmedian', 'np.nanmedian', (['corrfactor'], {}), '(corrfactor)\n', (5994, 6006), True, 'import numpy as np\n'), ((5924, 5950), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (5940, 5950), False, 'import os\n')] |
import pathlib
from setuptools import setup, find_packages
from pokemontcgsdkasync.config import __version__, __pypi_package_name__, __github_username__, __github_repo_name__
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text(encoding='utf-8')
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
url = 'https://github.com/' + __github_username__ + '/' + __github_repo_name__
download_url = "{}/tarball/{}".format(url, __version__)
setup(
name=__pypi_package_name__,
version=__version__,
description='Pokemon TCG SDK for pokemontcg.io using asyncio',
long_description=README,
long_description_content_type="text/markdown",
url=url,
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
keywords='pokemon tcg sdk trading card game api rest async',
download_url=download_url,
packages=find_packages(),
include_package_data=False,
python_requires=">=3.6",
install_requires=[
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.y+1.0' notation (this way you get bugfixes)
'dacite>=1.6.0,<2.0.0',
'aiohttp>=3.8.1,<4.0.0',
],
extras_require={
'tests': tests_require,
},
# entry_points={
# 'console_scripts': [
# # add cli scripts here in this form:
# # 'pokemontcgsdk=pokemontcgsdk.cli:main',
# ],
# },
)
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((222, 244), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (234, 244), False, 'import pathlib\n'), ((1325, 1340), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1338, 1340), False, 'from setuptools import setup, find_packages\n')] |
#!/usr/bin/env python3
import sys
def plot(x, y):
print((x, y))
def plot_line_low(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
yi = 1
if dy < 0:
yi = -1
dy = -dy
D = (2 * dy) - dx
y = y0
for x in range(x0, x1):
plot(x, y)
if D > 0:
y = y + yi
D = D + 2*(dy - dx)
else:
D = D + 2*dy
def plot_line_high(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
xi = 1
if dx < 0:
xi = -1
dx = -dx
D = (2 * dx) - dy
x = x0
for y in range(y0, y1):
plot(x, y)
if D > 0:
x = x + xi
D = D + 2*(dx - dy)
else:
D = D + 2*dx
def plot_line_v1(x0, y0, x1, y1):
if (abs(y1 - y0) < abs(x1 - x0)):
if x0 > x1:
plot_line_low(x1, y1, x0, y0)
else:
plot_line_low(x0, y0, x1, y1)
else:
if y0 > y1:
plot_line_high(x1, y1, x0, y0)
else:
plot_line_high(x0, y0, x1, y1)
def plot_line_v2(x0, y0, x1, y1):
dx = abs(x1 - x0)
sx = 1 if x0 < x1 else -1
dy = -abs(y1 - y0)
sy = 1 if y0 < y1 else -1
err = dx + dy
x = x0
y = y0
while True:
plot(x, y)
if x == x1 and y == y1:
break
e2 = 2 * err # translates to: e2 <= err << 1
if e2 >= dy:
err += dy
x += sx
if e2 <= dx:
err += dx
y += sy
def plot_line_v3(x0, y0, x1, y1, debug=False):
dx = x1 - x0
right = dx > 0
if debug:
print("right: {}".format(right))
if not right:
dx = -dx
if debug:
print("dx: {}".format(dx))
dy = y1 - y0
down = dy > 0
if debug:
print("down: {}".format(down))
if down:
dy = -dy
if debug:
print("dy: {}".format(dy))
err = dx + dy
x = x0
y = y0
while True:
plot(x, y)
if debug:
print("err: {}".format(err))
if x == x1 and y == y1:
break
e2 = err << 1
if debug:
print("e2: {}".format(e2))
if e2 >= dy:
if debug:
print("e2 >= dy")
err += dy
if right:
x += 1
else:
x -= 1
if e2 <= dx:
if debug:
print("e2 <= dx")
err += dx
if down:
y += 1
else:
y -= 1
def main(x0, y0, x1, y1, debug):
print("v1")
plot_line_v1(x0, y0, x1, y1)
print("v2")
plot_line_v2(x0, y0, x1, y1)
print("v3")
plot_line_v3(x0, y0, x1, y1, debug=debug)
if __name__ == '__main__':
"""
It uses Bresenham's line algorithm to
draw a line from (x0, y0) to (x1, y1)
ref: https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm#All_cases
"""
if len(sys.argv) < 5:
print("Usage: ./draw_line.py x0 y0 x1 y1 [-d]")
sys.exit(0)
x0 = int(sys.argv[1])
y0 = int(sys.argv[2])
x1 = int(sys.argv[3])
y1 = int(sys.argv[4])
debug = True if len(sys.argv) == 6 and sys.argv[5] == '-d' else False
main(x0, y0, x1, y1, debug)
| [
"sys.exit"
] | [((3031, 3042), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3039, 3042), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by <NAME>
email : <EMAIL>
The content of this file is based on
- PG_Manager by <NAME> <<EMAIL>> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
from functools import cmp_to_key
from qgis.PyQt.QtCore import QRegExp, QFile
from qgis.core import Qgis, QgsCredentials, QgsDataSourceUri
from ..connector import DBConnector
from ..plugin import ConnectionError, DbError, Table
import os
import psycopg2
import psycopg2.extensions
# use unicode!
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
def classFactory():
return PostGisDBConnector
class PostGisDBConnector(DBConnector):
def __init__(self, uri):
DBConnector.__init__(self, uri)
self.host = uri.host() or os.environ.get('PGHOST')
self.port = uri.port() or os.environ.get('PGPORT')
username = uri.username() or os.environ.get('PGUSER')
password = uri.password() or os.environ.get('PGPASSWORD')
# Do not get db and user names from the env if service is used
if not uri.service():
if username is None:
username = os.environ.get('USER')
self.dbname = uri.database() or os.environ.get('PGDATABASE') or username
uri.setDatabase(self.dbname)
expandedConnInfo = self._connectionInfo()
try:
self.connection = psycopg2.connect(expandedConnInfo)
except self.connection_error_types() as e:
# get credentials if cached or asking to the user no more than 3 times
err = str(e)
uri = self.uri()
conninfo = uri.connectionInfo(False)
for i in range(3):
(ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err)
if not ok:
raise ConnectionError(e)
if username:
uri.setUsername(username)
if password:
uri.setPassword(password)
newExpandedConnInfo = uri.connectionInfo(True)
try:
self.connection = psycopg2.connect(newExpandedConnInfo)
QgsCredentials.instance().put(conninfo, username, password)
except self.connection_error_types() as e:
if i == 2:
raise ConnectionError(e)
err = str(e)
finally:
# clear certs for each time trying to connect
self._clearSslTempCertsIfAny(newExpandedConnInfo)
finally:
# clear certs of the first connection try
self._clearSslTempCertsIfAny(expandedConnInfo)
self.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
c = self._execute(None, u"SELECT current_user,current_database()")
self.user, self.dbname = self._fetchone(c)
self._close_cursor(c)
self._checkSpatial()
self._checkRaster()
self._checkGeometryColumnsTable()
self._checkRasterColumnsTable()
def _connectionInfo(self):
return str(self.uri().connectionInfo(True))
def _clearSslTempCertsIfAny(self, connectionInfo):
# remove certs (if any) of the connectionInfo
expandedUri = QgsDataSourceUri(connectionInfo)
def removeCert(certFile):
certFile = certFile.replace("'", "")
file = QFile(certFile)
# set permission to allow removing on Win.
# On linux and Mac if file is set with QFile::>ReadUser
# does not create problem removing certs
if not file.setPermissions(QFile.WriteOwner):
raise Exception('Cannot change permissions on {}: error code: {}'.format(file.fileName(), file.error()))
if not file.remove():
raise Exception('Cannot remove {}: error code: {}'.format(file.fileName(), file.error()))
sslCertFile = expandedUri.param("sslcert")
if sslCertFile:
removeCert(sslCertFile)
sslKeyFile = expandedUri.param("sslkey")
if sslKeyFile:
removeCert(sslKeyFile)
sslCAFile = expandedUri.param("sslrootcert")
if sslCAFile:
removeCert(sslCAFile)
def _checkSpatial(self):
""" check whether postgis_version is present in catalog """
c = self._execute(None, u"SELECT COUNT(*) FROM pg_proc WHERE proname = 'postgis_version'")
self.has_spatial = self._fetchone(c)[0] > 0
self._close_cursor(c)
return self.has_spatial
def _checkRaster(self):
""" check whether postgis_version is present in catalog """
c = self._execute(None, u"SELECT COUNT(*) FROM pg_proc WHERE proname = 'postgis_raster_lib_version'")
self.has_raster = self._fetchone(c)[0] > 0
self._close_cursor(c)
return self.has_raster
def _checkGeometryColumnsTable(self):
c = self._execute(None,
u"SELECT relkind = 'v' OR relkind = 'm' FROM pg_class WHERE relname = 'geometry_columns' AND relkind IN ('v', 'r', 'm', 'p')")
res = self._fetchone(c)
self._close_cursor(c)
self.has_geometry_columns = (res is not None and len(res) != 0)
if not self.has_geometry_columns:
self.has_geometry_columns_access = self.is_geometry_columns_view = False
else:
self.is_geometry_columns_view = res[0]
# find out whether has privileges to access geometry_columns table
priv = self.getTablePrivileges('geometry_columns')
self.has_geometry_columns_access = priv[0]
return self.has_geometry_columns
def _checkRasterColumnsTable(self):
c = self._execute(None,
u"SELECT relkind = 'v' OR relkind = 'm' FROM pg_class WHERE relname = 'raster_columns' AND relkind IN ('v', 'r', 'm', 'p')")
res = self._fetchone(c)
self._close_cursor(c)
self.has_raster_columns = (res is not None and len(res) != 0)
if not self.has_raster_columns:
self.has_raster_columns_access = self.is_raster_columns_view = False
else:
self.is_raster_columns_view = res[0]
# find out whether has privileges to access geometry_columns table
self.has_raster_columns_access = self.getTablePrivileges('raster_columns')[0]
return self.has_raster_columns
def cancel(self):
if self.connection:
self.connection.cancel()
def getInfo(self):
c = self._execute(None, u"SELECT version()")
res = self._fetchone(c)
self._close_cursor(c)
return res
def getSpatialInfo(self):
""" returns tuple about PostGIS support:
- lib version
- geos version
- proj version
- installed scripts version
- released scripts version
"""
if not self.has_spatial:
return
try:
c = self._execute(None,
u"SELECT postgis_lib_version(), postgis_geos_version(), postgis_proj_version(), postgis_scripts_installed(), postgis_scripts_released()")
except DbError:
return
res = self._fetchone(c)
self._close_cursor(c)
return res
def hasSpatialSupport(self):
return self.has_spatial
def hasRasterSupport(self):
return self.has_raster
def hasCustomQuerySupport(self):
return Qgis.QGIS_VERSION[0:3] >= "1.5"
def hasTableColumnEditingSupport(self):
return True
def hasCreateSpatialViewSupport(self):
return True
def fieldTypes(self):
return [
"integer", "bigint", "smallint", # integers
"serial", "bigserial", # auto-incrementing ints
"real", "double precision", "numeric", # floats
"varchar", "varchar(255)", "char(20)", "text", # strings
"date", "time", "timestamp" # date/time
]
def getDatabasePrivileges(self):
""" db privileges: (can create schemas, can create temp. tables) """
sql = u"SELECT has_database_privilege(current_database(), 'CREATE'), has_database_privilege(current_database(), 'TEMP')"
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res
def getSchemaPrivileges(self, schema):
""" schema privileges: (can create new objects, can access objects in schema) """
schema = 'current_schema()' if schema is None else self.quoteString(schema)
sql = u"SELECT has_schema_privilege(%(s)s, 'CREATE'), has_schema_privilege(%(s)s, 'USAGE')" % {'s': schema}
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res
def getTablePrivileges(self, table):
""" table privileges: (select, insert, update, delete) """
schema, tablename = self.getSchemaTableName(table)
schema_priv = self.getSchemaPrivileges(schema)
if not schema_priv[1]:
return
t = self.quoteId(table)
sql = u"""SELECT has_table_privilege(%(t)s, 'SELECT'), has_table_privilege(%(t)s, 'INSERT'),
has_table_privilege(%(t)s, 'UPDATE'), has_table_privilege(%(t)s, 'DELETE')""" % {
't': self.quoteString(t)}
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res
def getSchemas(self):
""" get list of schemas in tuples: (oid, name, owner, perms) """
sql = u"SELECT oid, nspname, pg_get_userbyid(nspowner), nspacl, pg_catalog.obj_description(oid) FROM pg_namespace WHERE nspname !~ '^pg_' AND nspname != 'information_schema' ORDER BY nspname"
c = self._execute(None, sql)
res = self._fetchall(c)
self._close_cursor(c)
return res
def getTables(self, schema=None, add_sys_tables=False):
""" get list of tables """
tablenames = []
items = []
sys_tables = ["spatial_ref_sys", "geography_columns", "geometry_columns",
"raster_columns", "raster_overviews"]
try:
vectors = self.getVectorTables(schema)
for tbl in vectors:
if not add_sys_tables and tbl[1] in sys_tables and tbl[2] in ['', 'public']:
continue
tablenames.append((tbl[2], tbl[1]))
items.append(tbl)
except DbError:
pass
try:
rasters = self.getRasterTables(schema)
for tbl in rasters:
if not add_sys_tables and tbl[1] in sys_tables and tbl[2] in ['', 'public']:
continue
tablenames.append((tbl[2], tbl[1]))
items.append(tbl)
except DbError:
pass
sys_tables = ["spatial_ref_sys", "geography_columns", "geometry_columns",
"raster_columns", "raster_overviews"]
if schema:
schema_where = u" AND nspname = %s " % self.quoteString(schema)
else:
schema_where = u" AND (nspname != 'information_schema' AND nspname !~ 'pg_') "
# get all tables and views
sql = u"""SELECT
cla.relname, nsp.nspname, cla.relkind,
pg_get_userbyid(relowner), reltuples, relpages,
pg_catalog.obj_description(cla.oid)
FROM pg_class AS cla
JOIN pg_namespace AS nsp ON nsp.oid = cla.relnamespace
WHERE cla.relkind IN ('v', 'r', 'm', 'p') """ + schema_where + """
ORDER BY nsp.nspname, cla.relname"""
c = self._execute(None, sql)
for tbl in self._fetchall(c):
if tablenames.count((tbl[1], tbl[0])) <= 0:
item = list(tbl)
item.insert(0, Table.TableType)
items.append(item)
self._close_cursor(c)
return sorted(items, key=cmp_to_key(lambda x, y: (x[1] > y[1]) - (x[1] < y[1])))
def getVectorTables(self, schema=None):
""" get list of table with a geometry column
it returns:
name (table name)
namespace (schema)
type = 'view' (is a view?)
owner
tuples
pages
geometry_column:
f_geometry_column (or pg_attribute.attname, the geometry column name)
type (or pg_attribute.atttypid::regtype, the geometry column type name)
coord_dimension
srid
"""
if not self.has_spatial:
return []
if schema:
schema_where = u" AND nspname = %s " % self.quoteString(schema)
else:
schema_where = u" AND (nspname != 'information_schema' AND nspname !~ 'pg_') "
geometry_column_from = u""
geometry_fields_select = u"""att.attname,
textin(regtypeout(att.atttypid::regtype)),
NULL, NULL"""
if self.has_geometry_columns and self.has_geometry_columns_access:
geometry_column_from = u"""LEFT OUTER JOIN geometry_columns AS geo ON
cla.relname = geo.f_table_name AND nsp.nspname = f_table_schema AND
lower(att.attname) = lower(f_geometry_column)"""
geometry_fields_select = u"""CASE WHEN geo.f_geometry_column IS NOT NULL THEN geo.f_geometry_column ELSE att.attname END,
CASE WHEN geo.type IS NOT NULL THEN geo.type ELSE textin(regtypeout(att.atttypid::regtype)) END,
geo.coord_dimension, geo.srid"""
# discovery of all tables and whether they contain a geometry column
sql = u"""SELECT
cla.relname, nsp.nspname, cla.relkind,
pg_get_userbyid(relowner), cla.reltuples, cla.relpages,
pg_catalog.obj_description(cla.oid),
""" + geometry_fields_select + """
FROM pg_class AS cla
JOIN pg_namespace AS nsp ON
nsp.oid = cla.relnamespace
JOIN pg_attribute AS att ON
att.attrelid = cla.oid AND
att.atttypid = 'geometry'::regtype OR
att.atttypid IN (SELECT oid FROM pg_type WHERE typbasetype='geometry'::regtype )
""" + geometry_column_from + """
WHERE cla.relkind IN ('v', 'r', 'm', 'p') """ + schema_where + """
ORDER BY nsp.nspname, cla.relname, att.attname"""
items = []
c = self._execute(None, sql)
for i, tbl in enumerate(self._fetchall(c)):
item = list(tbl)
item.insert(0, Table.VectorType)
items.append(item)
self._close_cursor(c)
return items
def getRasterTables(self, schema=None):
""" get list of table with a raster column
it returns:
name (table name)
namespace (schema)
type = 'view' (is a view?)
owner
tuples
pages
raster_column:
r_raster_column (or pg_attribute.attname, the raster column name)
pixel type
block size
internal or external
srid
"""
if not self.has_spatial:
return []
if not self.has_raster:
return []
if schema:
schema_where = u" AND nspname = %s " % self.quoteString(schema)
else:
schema_where = u" AND (nspname != 'information_schema' AND nspname !~ 'pg_') "
raster_column_from = u""
raster_fields_select = u"""att.attname, NULL, NULL, NULL, NULL, NULL"""
if self.has_raster_columns and self.has_raster_columns_access:
raster_column_from = u"""LEFT OUTER JOIN raster_columns AS rast ON
cla.relname = rast.r_table_name AND nsp.nspname = r_table_schema AND
lower(att.attname) = lower(r_raster_column)"""
raster_fields_select = u"""CASE WHEN rast.r_raster_column IS NOT NULL THEN rast.r_raster_column ELSE att.attname END,
rast.pixel_types,
rast.scale_x,
rast.scale_y,
rast.out_db,
rast.srid"""
# discovery of all tables and whether they contain a raster column
sql = u"""SELECT
cla.relname, nsp.nspname, cla.relkind,
pg_get_userbyid(relowner), cla.reltuples, cla.relpages,
pg_catalog.obj_description(cla.oid),
""" + raster_fields_select + """
FROM pg_class AS cla
JOIN pg_namespace AS nsp ON
nsp.oid = cla.relnamespace
JOIN pg_attribute AS att ON
att.attrelid = cla.oid AND
att.atttypid = 'raster'::regtype OR
att.atttypid IN (SELECT oid FROM pg_type WHERE typbasetype='raster'::regtype )
""" + raster_column_from + """
WHERE cla.relkind IN ('v', 'r', 'm', 'p') """ + schema_where + """
ORDER BY nsp.nspname, cla.relname, att.attname"""
items = []
c = self._execute(None, sql)
for i, tbl in enumerate(self._fetchall(c)):
item = list(tbl)
item.insert(0, Table.RasterType)
items.append(item)
self._close_cursor(c)
return items
def getTableRowCount(self, table):
c = self._execute(None, u"SELECT COUNT(*) FROM %s" % self.quoteId(table))
res = self._fetchone(c)[0]
self._close_cursor(c)
return res
def getTableFields(self, table):
""" return list of columns in table """
schema, tablename = self.getSchemaTableName(table)
schema_where = u" AND nspname=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"""SELECT a.attnum AS ordinal_position,
a.attname AS column_name,
t.typname AS data_type,
a.attlen AS char_max_len,
a.atttypmod AS modifier,
a.attnotnull AS notnull,
a.atthasdef AS hasdefault,
adef.adsrc AS default_value,
pg_catalog.format_type(a.atttypid,a.atttypmod) AS formatted_type
FROM pg_class c
JOIN pg_attribute a ON a.attrelid = c.oid
JOIN pg_type t ON a.atttypid = t.oid
JOIN pg_namespace nsp ON c.relnamespace = nsp.oid
LEFT JOIN pg_attrdef adef ON adef.adrelid = a.attrelid AND adef.adnum = a.attnum
WHERE
a.attnum > 0 AND c.relname=%s %s
ORDER BY a.attnum""" % (self.quoteString(tablename), schema_where)
c = self._execute(None, sql)
res = self._fetchall(c)
self._close_cursor(c)
return res
def getTableIndexes(self, table):
""" get info about table's indexes. ignore primary key constraint index, they get listed in constraints """
schema, tablename = self.getSchemaTableName(table)
schema_where = u" AND nspname=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"""SELECT idxcls.relname, indkey, indisunique = 't'
FROM pg_index JOIN pg_class ON pg_index.indrelid=pg_class.oid
JOIN pg_class AS idxcls ON pg_index.indexrelid=idxcls.oid
JOIN pg_namespace nsp ON pg_class.relnamespace = nsp.oid
WHERE pg_class.relname=%s %s
AND indisprimary != 't' """ % (
self.quoteString(tablename), schema_where)
c = self._execute(None, sql)
res = self._fetchall(c)
self._close_cursor(c)
return res
def getTableConstraints(self, table):
schema, tablename = self.getSchemaTableName(table)
schema_where = u" AND nspname=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"""SELECT c.conname, c.contype, c.condeferrable, c.condeferred, array_to_string(c.conkey, ' '), c.consrc,
t2.relname, c.confupdtype, c.confdeltype, c.confmatchtype, array_to_string(c.confkey, ' ') FROM pg_constraint c
LEFT JOIN pg_class t ON c.conrelid = t.oid
LEFT JOIN pg_class t2 ON c.confrelid = t2.oid
JOIN pg_namespace nsp ON t.relnamespace = nsp.oid
WHERE t.relname = %s %s """ % (self.quoteString(tablename), schema_where)
c = self._execute(None, sql)
res = self._fetchall(c)
self._close_cursor(c)
return res
def getTableTriggers(self, table):
schema, tablename = self.getSchemaTableName(table)
schema_where = u" AND nspname=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"""SELECT tgname, proname, tgtype, tgenabled NOT IN ('f', 'D') FROM pg_trigger trig
LEFT JOIN pg_class t ON trig.tgrelid = t.oid
LEFT JOIN pg_proc p ON trig.tgfoid = p.oid
JOIN pg_namespace nsp ON t.relnamespace = nsp.oid
WHERE t.relname = %s %s """ % (
self.quoteString(tablename), schema_where)
c = self._execute(None, sql)
res = self._fetchall(c)
self._close_cursor(c)
return res
def enableAllTableTriggers(self, enable, table):
""" enable or disable all triggers on table """
self.enableTableTrigger(None, enable, table)
def enableTableTrigger(self, trigger, enable, table):
""" enable or disable one trigger on table """
trigger = self.quoteId(trigger) if trigger is not None else "ALL"
sql = u"ALTER TABLE %s %s TRIGGER %s" % (self.quoteId(table), "ENABLE" if enable else "DISABLE", trigger)
self._execute_and_commit(sql)
def deleteTableTrigger(self, trigger, table):
""" delete trigger on table """
sql = u"DROP TRIGGER %s ON %s" % (self.quoteId(trigger), self.quoteId(table))
self._execute_and_commit(sql)
def getTableRules(self, table):
schema, tablename = self.getSchemaTableName(table)
schema_where = u" AND schemaname=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"""SELECT rulename, definition FROM pg_rules
WHERE tablename=%s %s """ % (self.quoteString(tablename), schema_where)
c = self._execute(None, sql)
res = self._fetchall(c)
self._close_cursor(c)
return res
def deleteTableRule(self, rule, table):
""" delete rule on table """
sql = u"DROP RULE %s ON %s" % (self.quoteId(rule), self.quoteId(table))
self._execute_and_commit(sql)
def getTableExtent(self, table, geom):
""" find out table extent """
subquery = u"SELECT st_extent(%s) AS extent FROM %s" % (self.quoteId(geom), self.quoteId(table))
sql = u"SELECT st_xmin(extent), st_ymin(extent), st_xmax(extent), st_ymax(extent) FROM (%s) AS subquery" % subquery
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res
def getTableEstimatedExtent(self, table, geom):
""" find out estimated extent (from the statistics) """
if self.isRasterTable(table):
return
schema, tablename = self.getSchemaTableName(table)
schema_part = u"%s," % self.quoteString(schema) if schema is not None else ""
subquery = u"SELECT st_estimated_extent(%s%s,%s) AS extent" % (
schema_part, self.quoteString(tablename), self.quoteString(geom))
sql = u"""SELECT st_xmin(extent), st_ymin(extent), st_xmax(extent), st_ymax(extent) FROM (%s) AS subquery """ % subquery
try:
c = self._execute(None, sql)
except DbError: # No statistics for the current table
return
res = self._fetchone(c)
self._close_cursor(c)
return res
def getViewDefinition(self, view):
""" returns definition of the view """
schema, tablename = self.getSchemaTableName(view)
schema_where = u" AND nspname=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"""SELECT pg_get_viewdef(c.oid) FROM pg_class c
JOIN pg_namespace nsp ON c.relnamespace = nsp.oid
WHERE relname=%s %s AND (relkind='v' OR relkind='m') """ % (
self.quoteString(tablename), schema_where)
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res[0] if res is not None else None
def getSpatialRefInfo(self, srid):
if not self.has_spatial:
return
try:
c = self._execute(None, "SELECT srtext FROM spatial_ref_sys WHERE srid = '%d'" % srid)
except DbError:
return
sr = self._fetchone(c)
self._close_cursor(c)
if sr is None:
return
srtext = sr[0]
# try to extract just SR name (should be quoted in double quotes)
regex = QRegExp('"([^"]+)"')
if regex.indexIn(srtext) > -1:
srtext = regex.cap(1)
return srtext
def isVectorTable(self, table):
if self.has_geometry_columns and self.has_geometry_columns_access:
schema, tablename = self.getSchemaTableName(table)
sql = u"SELECT count(*) FROM geometry_columns WHERE f_table_schema = %s AND f_table_name = %s" % (
self.quoteString(schema), self.quoteString(tablename))
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res is not None and res[0] > 0
return False
def isRasterTable(self, table):
if self.has_raster_columns and self.has_raster_columns_access:
schema, tablename = self.getSchemaTableName(table)
sql = u"SELECT count(*) FROM raster_columns WHERE r_table_schema = %s AND r_table_name = %s" % (
self.quoteString(schema), self.quoteString(tablename))
c = self._execute(None, sql)
res = self._fetchone(c)
self._close_cursor(c)
return res is not None and res[0] > 0
return False
def createTable(self, table, field_defs, pkey):
""" create ordinary table
'fields' is array containing field definitions
'pkey' is the primary key name
"""
if len(field_defs) == 0:
return False
sql = "CREATE TABLE %s (" % self.quoteId(table)
sql += u", ".join(field_defs)
if pkey is not None and pkey != "":
sql += u", PRIMARY KEY (%s)" % self.quoteId(pkey)
sql += ")"
self._execute_and_commit(sql)
return True
def deleteTable(self, table):
""" delete table and its reference in either geometry_columns or raster_columns """
schema, tablename = self.getSchemaTableName(table)
schema_part = u"%s, " % self.quoteString(schema) if schema is not None else ""
if self.isVectorTable(table):
sql = u"SELECT DropGeometryTable(%s%s)" % (schema_part, self.quoteString(tablename))
elif self.isRasterTable(table):
# Fix #8521: delete raster table and references from raster_columns table
sql = u"DROP TABLE %s" % self.quoteId(table)
else:
sql = u"DROP TABLE %s" % self.quoteId(table)
self._execute_and_commit(sql)
def emptyTable(self, table):
""" delete all rows from table """
sql = u"TRUNCATE %s" % self.quoteId(table)
self._execute_and_commit(sql)
def renameTable(self, table, new_table):
""" rename a table in database """
schema, tablename = self.getSchemaTableName(table)
if new_table == tablename:
return
c = self._get_cursor()
sql = u"ALTER TABLE %s RENAME TO %s" % (self.quoteId(table), self.quoteId(new_table))
self._execute(c, sql)
# update geometry_columns if PostGIS is enabled
if self.has_geometry_columns and not self.is_geometry_columns_view:
schema_where = u" AND f_table_schema=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"UPDATE geometry_columns SET f_table_name=%s WHERE f_table_name=%s %s" % (
self.quoteString(new_table), self.quoteString(tablename), schema_where)
self._execute(c, sql)
self._commit()
def commentTable(self, schema, tablename, comment=None):
if comment is None:
self._execute(None, 'COMMENT ON TABLE "{0}"."{1}" IS NULL;'.format(schema, tablename))
else:
self._execute(None, 'COMMENT ON TABLE "{0}"."{1}" IS E\'{2}\';'.format(schema, tablename, comment))
def getComment(self, tablename, field):
"""Returns the comment for a field"""
# SQL Query checking if a comment exists for the field
sql_cpt = "Select count(*) from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tablename, field)
# SQL Query that return the comment of the field
sql = "Select pd.description from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tablename, field)
c = self._execute(None, sql_cpt) # Execute Check query
res = self._fetchone(c)[0] # Store result
if res == 1:
# When a comment exists
c = self._execute(None, sql) # Execute query
res = self._fetchone(c)[0] # Store result
self._close_cursor(c) # Close cursor
return res # Return comment
else:
return ''
def moveTableToSchema(self, table, new_schema):
schema, tablename = self.getSchemaTableName(table)
if new_schema == schema:
return
c = self._get_cursor()
sql = u"ALTER TABLE %s SET SCHEMA %s" % (self.quoteId(table), self.quoteId(new_schema))
self._execute(c, sql)
# update geometry_columns if PostGIS is enabled
if self.has_geometry_columns and not self.is_geometry_columns_view:
schema, tablename = self.getSchemaTableName(table)
schema_where = u" AND f_table_schema=%s " % self.quoteString(schema) if schema is not None else ""
sql = u"UPDATE geometry_columns SET f_table_schema=%s WHERE f_table_name=%s %s" % (
self.quoteString(new_schema), self.quoteString(tablename), schema_where)
self._execute(c, sql)
self._commit()
def moveTable(self, table, new_table, new_schema=None):
schema, tablename = self.getSchemaTableName(table)
if new_schema == schema and new_table == tablename:
return
if new_schema == schema:
return self.renameTable(table, new_table)
if new_table == table:
return self.moveTableToSchema(table, new_schema)
c = self._get_cursor()
t = u"__new_table__"
sql = u"ALTER TABLE %s RENAME TO %s" % (self.quoteId(table), self.quoteId(t))
self._execute(c, sql)
sql = u"ALTER TABLE %s SET SCHEMA %s" % (self.quoteId((schema, t)), self.quoteId(new_schema))
self._execute(c, sql)
sql = u"ALTER TABLE %s RENAME TO %s" % (self.quoteId((new_schema, t)), self.quoteId(table))
self._execute(c, sql)
# update geometry_columns if PostGIS is enabled
if self.has_geometry_columns and not self.is_geometry_columns_view:
schema, tablename = self.getSchemaTableName(table)
schema_where = u" f_table_schema=%s AND " % self.quoteString(schema) if schema is not None else ""
schema_part = u" f_table_schema=%s, " % self.quoteString(new_schema) if schema is not None else ""
sql = u"UPDATE geometry_columns SET %s f_table_name=%s WHERE %s f_table_name=%s" % (
schema_part, self.quoteString(new_table), schema_where, self.quoteString(tablename))
self._execute(c, sql)
self._commit()
def createView(self, view, query):
sql = u"CREATE VIEW %s AS %s" % (self.quoteId(view), query)
self._execute_and_commit(sql)
def createSpatialView(self, view, query):
self.createView(view, query)
def deleteView(self, view, isMaterialized=False):
sql = u"DROP %s VIEW %s" % ('MATERIALIZED' if isMaterialized else '', self.quoteId(view))
self._execute_and_commit(sql)
def renameView(self, view, new_name):
""" rename view in database """
self.renameTable(view, new_name)
def createSchema(self, schema):
""" create a new empty schema in database """
sql = u"CREATE SCHEMA %s" % self.quoteId(schema)
self._execute_and_commit(sql)
def deleteSchema(self, schema):
""" drop (empty) schema from database """
sql = u"DROP SCHEMA %s" % self.quoteId(schema)
self._execute_and_commit(sql)
def renameSchema(self, schema, new_schema):
""" rename a schema in database """
sql = u"ALTER SCHEMA %s RENAME TO %s" % (self.quoteId(schema), self.quoteId(new_schema))
self._execute_and_commit(sql)
def runVacuum(self):
""" run vacuum on the db """
self._execute_and_commit("VACUUM")
def runVacuumAnalyze(self, table):
""" run vacuum analyze on a table """
sql = u"VACUUM ANALYZE %s" % self.quoteId(table)
self._execute(None, sql)
self._commit()
def runRefreshMaterializedView(self, table):
""" run refresh materialized view on a table """
sql = u"REFRESH MATERIALIZED VIEW %s" % self.quoteId(table)
self._execute(None, sql)
self._commit()
def addTableColumn(self, table, field_def):
""" add a column to table """
sql = u"ALTER TABLE %s ADD %s" % (self.quoteId(table), field_def)
self._execute_and_commit(sql)
def deleteTableColumn(self, table, column):
""" delete column from a table """
if self.isGeometryColumn(table, column):
# use PostGIS function to delete geometry column correctly
schema, tablename = self.getSchemaTableName(table)
schema_part = u"%s, " % self.quoteString(schema) if schema else ""
sql = u"SELECT DropGeometryColumn(%s%s, %s)" % (
schema_part, self.quoteString(tablename), self.quoteString(column))
else:
sql = u"ALTER TABLE %s DROP %s" % (self.quoteId(table), self.quoteId(column))
self._execute_and_commit(sql)
def updateTableColumn(self, table, column, new_name=None, data_type=None, not_null=None, default=None, comment=None, test=None):
if new_name is None and data_type is None and not_null is None and default is None and comment is None:
return
c = self._get_cursor()
# update column definition
col_actions = []
if data_type is not None:
col_actions.append(u"TYPE %s" % data_type)
if not_null is not None:
col_actions.append(u"SET NOT NULL" if not_null else u"DROP NOT NULL")
if default is not None:
if default and default != '':
col_actions.append(u"SET DEFAULT %s" % default)
else:
col_actions.append(u"DROP DEFAULT")
if len(col_actions) > 0:
sql = u"ALTER TABLE %s" % self.quoteId(table)
alter_col_str = u"ALTER %s" % self.quoteId(column)
for a in col_actions:
sql += u" %s %s," % (alter_col_str, a)
self._execute(c, sql[:-1])
# rename the column
if new_name is not None and new_name != column:
sql = u"ALTER TABLE %s RENAME %s TO %s" % (
self.quoteId(table), self.quoteId(column), self.quoteId(new_name))
self._execute(c, sql)
# update geometry_columns if PostGIS is enabled
if self.has_geometry_columns and not self.is_geometry_columns_view:
schema, tablename = self.getSchemaTableName(table)
schema_where = u" f_table_schema=%s AND " % self.quoteString(schema) if schema is not None else ""
sql = u"UPDATE geometry_columns SET f_geometry_column=%s WHERE %s f_table_name=%s AND f_geometry_column=%s" % (
self.quoteString(new_name), schema_where, self.quoteString(tablename), self.quoteString(column))
self._execute(c, sql)
# comment the column
if comment is not None:
schema, tablename = self.getSchemaTableName(table)
sql = u"COMMENT ON COLUMN %s.%s.%s is '%s'" % (schema, tablename, column, comment)
self._execute(c, sql)
self._commit()
def renameTableColumn(self, table, column, new_name):
""" rename column in a table """
return self.updateTableColumn(table, column, new_name)
def setTableColumnType(self, table, column, data_type):
""" change column type """
return self.updateTableColumn(table, column, None, data_type)
def setTableColumnNull(self, table, column, is_null):
""" change whether column can contain null values """
return self.updateTableColumn(table, column, None, None, not is_null)
def setTableColumnDefault(self, table, column, default):
""" change column's default value.
If default=None or an empty string drop default value """
return self.updateTableColumn(table, column, None, None, None, default)
def isGeometryColumn(self, table, column):
schema, tablename = self.getSchemaTableName(table)
schema_where = u" f_table_schema=%s AND " % self.quoteString(schema) if schema is not None else ""
sql = u"SELECT count(*) > 0 FROM geometry_columns WHERE %s f_table_name=%s AND f_geometry_column=%s" % (
schema_where, self.quoteString(tablename), self.quoteString(column))
c = self._execute(None, sql)
res = self._fetchone(c)[0] == 't'
self._close_cursor(c)
return res
def addGeometryColumn(self, table, geom_column='geom', geom_type='POINT', srid=-1, dim=2):
schema, tablename = self.getSchemaTableName(table)
schema_part = u"%s, " % self.quoteString(schema) if schema else ""
sql = u"SELECT AddGeometryColumn(%s%s, %s, %d, %s, %d)" % (
schema_part, self.quoteString(tablename), self.quoteString(geom_column), srid, self.quoteString(geom_type), dim)
self._execute_and_commit(sql)
def deleteGeometryColumn(self, table, geom_column):
return self.deleteTableColumn(table, geom_column)
def addTableUniqueConstraint(self, table, column):
""" add a unique constraint to a table """
sql = u"ALTER TABLE %s ADD UNIQUE (%s)" % (self.quoteId(table), self.quoteId(column))
self._execute_and_commit(sql)
def deleteTableConstraint(self, table, constraint):
""" delete constraint in a table """
sql = u"ALTER TABLE %s DROP CONSTRAINT %s" % (self.quoteId(table), self.quoteId(constraint))
self._execute_and_commit(sql)
def addTablePrimaryKey(self, table, column):
""" add a primery key (with one column) to a table """
sql = u"ALTER TABLE %s ADD PRIMARY KEY (%s)" % (self.quoteId(table), self.quoteId(column))
self._execute_and_commit(sql)
def createTableIndex(self, table, name, column):
""" create index on one column using default options """
sql = u"CREATE INDEX %s ON %s (%s)" % (self.quoteId(name), self.quoteId(table), self.quoteId(column))
self._execute_and_commit(sql)
def deleteTableIndex(self, table, name):
schema, tablename = self.getSchemaTableName(table)
sql = u"DROP INDEX %s" % self.quoteId((schema, name))
self._execute_and_commit(sql)
def createSpatialIndex(self, table, geom_column='geom'):
schema, tablename = self.getSchemaTableName(table)
idx_name = self.quoteId(u"sidx_%s_%s" % (tablename, geom_column))
sql = u"CREATE INDEX %s ON %s USING GIST(%s)" % (idx_name, self.quoteId(table), self.quoteId(geom_column))
self._execute_and_commit(sql)
def deleteSpatialIndex(self, table, geom_column='geom'):
schema, tablename = self.getSchemaTableName(table)
idx_name = self.quoteId(u"sidx_%s_%s" % (tablename, geom_column))
return self.deleteTableIndex(table, idx_name)
def execution_error_types(self):
return psycopg2.Error, psycopg2.ProgrammingError, psycopg2.Warning
def connection_error_types(self):
return psycopg2.InterfaceError, psycopg2.OperationalError
# moved into the parent class: DbConnector._execute()
# def _execute(self, cursor, sql):
# pass
# moved into the parent class: DbConnector._execute_and_commit()
# def _execute_and_commit(self, sql):
# pass
# moved into the parent class: DbConnector._get_cursor()
# def _get_cursor(self, name=None):
# pass
# moved into the parent class: DbConnector._fetchall()
# def _fetchall(self, c):
# pass
# moved into the parent class: DbConnector._fetchone()
# def _fetchone(self, c):
# pass
# moved into the parent class: DbConnector._commit()
# def _commit(self):
# pass
# moved into the parent class: DbConnector._rollback()
# def _rollback(self):
# pass
# moved into the parent class: DbConnector._get_cursor_columns()
# def _get_cursor_columns(self, c):
# pass
def getSqlDictionary(self):
from .sql_dictionary import getSqlDictionary
sql_dict = getSqlDictionary()
# get schemas, tables and field names
items = []
sql = u"""SELECT nspname FROM pg_namespace WHERE nspname !~ '^pg_' AND nspname != 'information_schema'
UNION SELECT relname FROM pg_class WHERE relkind IN ('v', 'r', 'm', 'p')
UNION SELECT attname FROM pg_attribute WHERE attnum > 0"""
c = self._execute(None, sql)
for row in self._fetchall(c):
items.append(row[0])
self._close_cursor(c)
sql_dict["identifier"] = items
return sql_dict
def getQueryBuilderDictionary(self):
from .sql_dictionary import getQueryBuilderDictionary
return getQueryBuilderDictionary()
| [
"psycopg2.connect",
"functools.cmp_to_key",
"psycopg2.extensions.register_type",
"os.environ.get",
"qgis.PyQt.QtCore.QFile",
"builtins.str",
"builtins.range",
"qgis.core.QgsCredentials.instance",
"qgis.PyQt.QtCore.QRegExp",
"qgis.core.QgsDataSourceUri"
] | [((1443, 1505), 'psycopg2.extensions.register_type', 'psycopg2.extensions.register_type', (['psycopg2.extensions.UNICODE'], {}), '(psycopg2.extensions.UNICODE)\n', (1476, 1505), False, 'import psycopg2\n'), ((1506, 1573), 'psycopg2.extensions.register_type', 'psycopg2.extensions.register_type', (['psycopg2.extensions.UNICODEARRAY'], {}), '(psycopg2.extensions.UNICODEARRAY)\n', (1539, 1573), False, 'import psycopg2\n'), ((4336, 4368), 'qgis.core.QgsDataSourceUri', 'QgsDataSourceUri', (['connectionInfo'], {}), '(connectionInfo)\n', (4352, 4368), False, 'from qgis.core import Qgis, QgsCredentials, QgsDataSourceUri\n'), ((28559, 28579), 'qgis.PyQt.QtCore.QRegExp', 'QRegExp', (['""""([^"]+)\\""""'], {}), '(\'"([^"]+)"\')\n', (28566, 28579), False, 'from qgis.PyQt.QtCore import QRegExp, QFile\n'), ((1772, 1796), 'os.environ.get', 'os.environ.get', (['"""PGHOST"""'], {}), "('PGHOST')\n", (1786, 1796), False, 'import os\n'), ((1831, 1855), 'os.environ.get', 'os.environ.get', (['"""PGPORT"""'], {}), "('PGPORT')\n", (1845, 1855), False, 'import os\n'), ((1894, 1918), 'os.environ.get', 'os.environ.get', (['"""PGUSER"""'], {}), "('PGUSER')\n", (1908, 1918), False, 'import os\n'), ((1956, 1984), 'os.environ.get', 'os.environ.get', (['"""PGPASSWORD"""'], {}), "('PGPASSWORD')\n", (1970, 1984), False, 'import os\n'), ((2390, 2424), 'psycopg2.connect', 'psycopg2.connect', (['expandedConnInfo'], {}), '(expandedConnInfo)\n', (2406, 2424), False, 'import psycopg2\n'), ((4472, 4487), 'qgis.PyQt.QtCore.QFile', 'QFile', (['certFile'], {}), '(certFile)\n', (4477, 4487), False, 'from qgis.PyQt.QtCore import QRegExp, QFile\n'), ((2147, 2169), 'os.environ.get', 'os.environ.get', (['"""USER"""'], {}), "('USER')\n", (2161, 2169), False, 'import os\n'), ((2214, 2242), 'os.environ.get', 'os.environ.get', (['"""PGDATABASE"""'], {}), "('PGDATABASE')\n", (2228, 2242), False, 'import os\n'), ((2577, 2583), 'builtins.str', 'str', (['e'], {}), '(e)\n', (2580, 2583), False, 'from builtins import str\n'), ((2684, 2692), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (2689, 2692), False, 'from builtins import range\n'), ((13305, 13359), 'functools.cmp_to_key', 'cmp_to_key', (['(lambda x, y: (x[1] > y[1]) - (x[1] < y[1]))'], {}), '(lambda x, y: (x[1] > y[1]) - (x[1] < y[1]))\n', (13315, 13359), False, 'from functools import cmp_to_key\n'), ((3149, 3186), 'psycopg2.connect', 'psycopg2.connect', (['newExpandedConnInfo'], {}), '(newExpandedConnInfo)\n', (3165, 3186), False, 'import psycopg2\n'), ((2737, 2762), 'qgis.core.QgsCredentials.instance', 'QgsCredentials.instance', ([], {}), '()\n', (2760, 2762), False, 'from qgis.core import Qgis, QgsCredentials, QgsDataSourceUri\n'), ((3432, 3438), 'builtins.str', 'str', (['e'], {}), '(e)\n', (3435, 3438), False, 'from builtins import str\n'), ((3207, 3232), 'qgis.core.QgsCredentials.instance', 'QgsCredentials.instance', ([], {}), '()\n', (3230, 3232), False, 'from qgis.core import Qgis, QgsCredentials, QgsDataSourceUri\n')] |
#!/usr/bin/python
import os
from subprocess import call
NCMDS = 2
message = str(raw_input('type your message: '))
command1 = "git commit -m '%s'"%(message)
push = 'git push origin master'
cmd_list = [command1, push]
for cmd in cmd_list:
call(cmd, shell=True)
| [
"subprocess.call"
] | [((250, 271), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (254, 271), False, 'from subprocess import call\n')] |
import smtplib
from contextlib import contextmanager
import pandas as pd, datetime as dt,numpy as np
import re, os, ssl
import credentials, glob
import base64
from templateReport import * # template html of all content of the email
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email import encoders
today = dt.date.today().strftime('%Y-%m-%d')
hoy = dt.date.today().strftime('%d/%m/%y')
excel = pd.read_csv(f'/home/lorenzo/Quanvas/Dollar Futures {today}.csv')
excel = excel.rename(columns={'Unnamed: 0':''})
excel['Close'] = ['$ {:,.2f}'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['Close'].astype(float)).values)]
excel['30 days'] = ['$ {:,.2f}'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['30 days'].astype(float)).values[:-2])] + [np.nan,np.nan]
excel['Percent'] = ['{:,.2f} %'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['Percent'].astype(float)).values[:-2])] + [np.nan,np.nan]
excel.iloc[:,-4:] = excel.iloc[:,-4:] * 100.0
excel['Impl. Rate'] = [np.nan] + ['{:,.2f} %'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['Impl. Rate'].astype(float)).values[1:])]
excel['Previous Impl. Rate'] = [np.nan] + ['{:,.2f} %'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['Previous Impl. Rate'].astype(float)).values[1:-2])] + [np.nan,np.nan]
excel['Effective Annual Rate'] = [np.nan] + ['{:,.2f} %'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['Effective Annual Rate'].astype(float)).values[1:])]
excel['Impl. Rate 30d'] = [np.nan] + ['{:,.2f} %'.format(i).replace('.','p').replace(',','.').replace('p',',') for i in list((excel['Impl. Rate 30d'].astype(float)).values[1:])]
html_file = excel.to_html(na_rep = "",index=False).replace('<th>','<th style="color:white">').replace('<table>','<table style="width:100%">')
# Stats to show
exchanges = pd.read_csv('/home/lorenzo/Quanvas/Exchanges.csv')
exchanges.index = exchanges.iloc[:,0].values
del exchanges['Date']
# Last Price
mep,adr,dollar = exchanges.iloc[-1,0],exchanges.iloc[-1,1],exchanges.iloc[-1,2]
# Price 3 months before
mep3,adr3,dollar3 = exchanges.iloc[-60,0],exchanges.iloc[-60,1],exchanges.iloc[-60,2]
# Return over 3 months time lapse
mep3R, adr3R, dollar3R = exchanges.iloc[-60:,0].pct_change().sum(),exchanges.iloc[-60:,1].pct_change().sum(),exchanges.iloc[-60:,2].pct_change().sum()
# We assume that the image file is in the same directory that you run your Python script from
fp = open('/home/lorenzo/Quanvas/Exchanges.png', 'rb')
image1 = MIMEImage(fp.read())
fp.close()
fp = open('/home/lorenzo/Quanvas/SpotnFutures.png', 'rb')
image2 = MIMEImage(fp.read())
fp.close()
fp = open('/home/lorenzo/Quanvas/futuresReturn.png', 'rb')
image3 = MIMEImage(fp.read())
fp.close()
# send especific email
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
# login account + password
server.login(credentials.account,credentials.password)
clients = pd.read_csv('/home/lorenzo/Quanvas/scanner.csv')
hoy = dt.date.today().strftime('%d-%m-%Y')
destinatarios = ['<EMAIL>']#, f'{clients.Email.values[i]}']
def sendEmails(message):
msg = MIMEMultipart('alternative',text='URGENT!')
msg['X-Priority'] = '1'
msg['Subject'] = f"Quanvas Estado Cambiario Argentino {clients.Name[i]} {hoy}"
msg['From'] = credentials.account
msg['To'] = ",".join(destinatarios)
# body of the email
part1 = message
#part1 = _fix_eols(part1).encode('utf-8')
part1 = MIMEText(part1, 'html')
msg.attach(part1)
## Specify the ID according to the img src in the HTML part
image1.add_header('Content-ID', '<Exchanges>')
msg.attach(image1)
image2.add_header('Content-ID', '<SpotnFutures>')
msg.attach(image2)
image3.add_header('Content-ID', '<futuresReturn>')
msg.attach(image3)
server.sendmail(credentials.account,
destinatarios,
msg.as_string())
for i in range(len(clients)):
morning = f'<h2>Buenos dias {clients.Name.values[i]}</h2>'
plotForex = f"""<img src="cid:Exchanges" style="width:80%; display: block; margin-left: auto; margin-right: auto;" loading="lazy">"""
preview = """<h2>El Estado del mercado cambiario argentino se describe bajo los siguientes indicadores.</h2>"""
dolares = f"""<h3><ul>
<li>Cambio Dolar MEP: {round(mep3R*100.0,2)}% [MEP HOY ${round(mep,2)}, 3 Meses Antes ${round(mep3,2)}].</li>
<li>Cambio Dolar ADR: {round(adr3R*100.0,2)}% [ADR HOY ${round(adr,2)}, 3 Meses Antes ${round(adr3,2)}].</li>
<li>Cambio Dolar País: {round(dollar3R*100.0,2)}% [Dolar HOY$ ${round(dollar,2)}, 3 Meses Antes ${round(dollar3,2)}].</li>
</ul></h3>"""
derivatives = """<h3>Información Respecto a la Curva de Dolares Futuros MATBA-ROFEX.</h3>"""
series = """<h3>Serie Spot y Futuros</h3>"""
plotFutures = """<img src="cid:SpotnFutures" style="width:80%; display: block; margin-left:auto; margin-right:auto;" loading="lazy">"""
retorno = """<h3>Retorno del Spot y Futuros</h3>"""
plotReturn = """<img src="cid:futuresReturn" style="width:80%; display: block; margin-left:auto; margin-right:auto;" loading="lazy">"""
firma = """<h2>Esperamos que esta minuta lo mantenga informado.</h2>
<h2>Sin más saludamos, equipo QUANVAS.</h2> """
message = style + highlight + morning + preview + dolares + plotForex + derivatives + html_file + series + plotFutures + retorno + plotReturn + firma + end_html
e = sendEmails(message)
print(f"{dt.datetime.now().strftime('%H:%M:%S:%f')} Quanvas Report {clients.Name.values[i]} at {clients.Email.values[i]} SENT!!!")
e
server.quit()
| [
"smtplib.SMTP",
"pandas.read_csv",
"datetime.datetime.now",
"email.mime.multipart.MIMEMultipart",
"datetime.date.today",
"email.mime.text.MIMEText"
] | [((521, 585), 'pandas.read_csv', 'pd.read_csv', (['f"""/home/lorenzo/Quanvas/Dollar Futures {today}.csv"""'], {}), "(f'/home/lorenzo/Quanvas/Dollar Futures {today}.csv')\n", (532, 585), True, 'import pandas as pd, datetime as dt, numpy as np\n'), ((2091, 2141), 'pandas.read_csv', 'pd.read_csv', (['"""/home/lorenzo/Quanvas/Exchanges.csv"""'], {}), "('/home/lorenzo/Quanvas/Exchanges.csv')\n", (2102, 2141), True, 'import pandas as pd, datetime as dt, numpy as np\n'), ((3020, 3055), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (3032, 3055), False, 'import smtplib\n'), ((3181, 3229), 'pandas.read_csv', 'pd.read_csv', (['"""/home/lorenzo/Quanvas/scanner.csv"""'], {}), "('/home/lorenzo/Quanvas/scanner.csv')\n", (3192, 3229), True, 'import pandas as pd, datetime as dt, numpy as np\n'), ((3369, 3413), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', (['"""alternative"""'], {'text': '"""URGENT!"""'}), "('alternative', text='URGENT!')\n", (3382, 3413), False, 'from email.mime.multipart import MIMEMultipart\n'), ((3705, 3728), 'email.mime.text.MIMEText', 'MIMEText', (['part1', '"""html"""'], {}), "(part1, 'html')\n", (3713, 3728), False, 'from email.mime.text import MIMEText\n'), ((433, 448), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (446, 448), True, 'import pandas as pd, datetime as dt, numpy as np\n'), ((476, 491), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (489, 491), True, 'import pandas as pd, datetime as dt, numpy as np\n'), ((3236, 3251), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (3249, 3251), True, 'import pandas as pd, datetime as dt, numpy as np\n'), ((5794, 5811), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5809, 5811), True, 'import pandas as pd, datetime as dt, numpy as np\n')] |
"""Block.Io API backend.
Supports Bitcoin, Dogecoin and Litecoin on `block.io <https://block.io>`_ API.
The backend configuration takes following parameters.
:param class: Always ``cryptoassets.core.backend.blockio.BlockIo``
:param api_key: block.io API key
:param password: <PASSWORD>
:param network: one of ``btc``, ``btctest``, ``doge``, ``dogetest``, see `chain.so <https://chain.so>`_ for full list
:param walletnotify: Configuration of wallet notify service set up for incoming transactions. You must use :py:class:`cryptoassets.core.backend.blockiowebhook.BlockIoWebhookNotifyHandler` or :py:class:`cryptoassets.core.backend.blockiowebocket.BlockIoWebsocketNotifyHandler` as ``walletnotify`` for incoming transactions for now. See below for more details.
Example configuration for block.io backend using websockets.
.. code-block:: yaml
---
# Cryptoassets.core configuration for running block.io unit tests
database:
url: sqlite:////tmp/cryptoassts-unittest-blockio.sqlite
coins:
doge:
backend:
class: cryptoassets.core.backend.blockio.BlockIo
api_key: yyy
pin: xxxx
network: dogetest
walletnotify:
class: cryptoassets.core.backend.blockiowebsocket.BlockIoWebsocketNotifyHandler
"""
import logging
from decimal import Decimal
import datetime
import threading
import requests
from slugify import slugify
from block_io import BlockIo as _BlockIo
from . import base
from ..utils import iterutil
logger = logging.getLogger(__name__)
def _transform_txdata_to_bitcoind_format(inp):
"""Grab out data as mangle we expect it to be.
Input chain.so format txdata and output it as bitcoind format txdata. We probably miss half of the things ATM, so please keep updating this function.
"""
output = {}
assert inp["status"] == "success"
inp = inp["data"]
output["confirmations"] = inp["confirmations"]
output["txid"] = inp["txid"]
output["details"] = []
for op in inp["outputs"]:
output["details"].append(dict(category="receive", address=op["address"], amount=Decimal(op["value"])))
output["only_receive"] = True
return output
class BlockIo(base.CoinBackend):
"""Block.io API."""
def __init__(self, coin, api_key, pin, network=None, walletnotify=None):
"""
:param wallet_notify: Wallet notify configuration
"""
base.CoinBackend.__init__(self)
self.coin = coin
self.api_key = api_key
self.pin = pin
self.block_io = _BlockIo(api_key, pin, 2)
assert network, "Please give argument network as one of chain.so networks: btc, btctest, doge, dogetest"
self.network = network
self.walletnotify_config = walletnotify
def require_tracking_incoming_confirmations(self):
return True
def to_internal_amount(self, amount):
return Decimal(amount)
def to_external_amount(self, amount):
return str(amount)
def create_address(self, label):
"""Create a new address on block.io wallet.
Note that block.io blocks creating addresses with the same label.
"""
# # block.io does not allow arbitrary characters in labels
label = slugify(label)
result = self.block_io.get_new_address(label=label)
# {'data': {'address': '2N2Qqvj5rXv27rS6b7rMejUvapwvRQ1ahUq', 'user_id': 5, 'label': 'slange11', 'network': 'BTCTEST'}, 'status': 'success'}
address = result["data"]["address"]
return address
def get_balances(self, addresses):
""" Get balances on multiple addresses.
"""
result = self.block_io.get_address_balance(addresses=",".join(addresses))
# {'data': {'balances': [{'pending_received_balance': '0.00000000', 'address': '2MsgW3kCrRFtJuo9JNjkorWXaZSvLk4EWRr',
# 'available_balance': '0.42000000', 'user_id': 0, 'label': 'default'}], 'available_balance': '0.42000000',
# 'network': 'BTCTEST', 'pending_received_balance': '0.00000000'}, 'status': 'success'}
if "balances" not in result["data"]:
# Not yet address on this wallet
raise StopIteration
for balance in result["data"]["balances"]:
yield balance["address"], self.to_internal_amount(balance["available_balance"])
def get_backend_balance(self, confirmations=3):
"""Get full available hot wallet balance on the backend.
:return Decimal:
"""
resp = self.block_io.get_balance()
# {'status': 'success', 'data': {'pending_received_balance': '0.00000000', 'available_balance': '0.13553300', 'network': 'BTCTEST'}}
if confirmations == 0:
return self.to_internal_amount(resp["data"]["pending_received_balance"])
else:
return self.to_internal_amount(resp["data"]["available_balance"])
def send(self, recipients, label):
"""
BlockIo does not support labelling outgoing transactions.
:param recipients: Dict of (address, satoshi amount)
"""
assert recipients
amounts = []
addresses = []
for address, satoshis in recipients.items():
amounts.append(str(self.to_external_amount(satoshis)))
addresses.append(address)
resp = self.block_io.withdraw(amounts=",".join(amounts), to_addresses=",".join(addresses))
return resp["data"]["txid"], self.to_internal_amount(resp["data"]["network_fee"])
def get_transaction(self, txid):
""" """
resp = requests.get("https://chain.so/api/v2/get_tx/{}/{}".format(self.network, txid))
data = resp.json()
data = _transform_txdata_to_bitcoind_format(data)
return data
def list_received_transactions(self, extra={}):
""" """
return ListReceivedTransactionsIterator(self)
class ListReceivedTransactionsIterator(base.ListTransactionsIterator):
"""Receive a batch of receive transactiosn from block.io API.
https://block.io/api/simple/python
"""
def __init__(self, backend):
self.backend = backend
self.before_tx = None
self.last_timestamp = None
self.finished = False
def _format_bitcoind_like(self, result):
"""Grab data from block.io response and format received details bitcoind like.
https://block.io/api/v2/get_transactions/?api_key=923f-e3e9-a580-dfb2&type=received
"""
out = {}
out["confirmations"] = result["confirmations"]
out["txid"] = result["txid"]
details = []
for received in result["amounts_received"]:
details.append(dict(category="receive", address=received["recipient"], amount=Decimal(received["amount"])))
# See top comment
out["only_receive"] = True
out["details"] = details
return out
def fetch_next_txids(self):
"""
:return: List of next txids to iterate or empty list if iterating is done.
"""
if self.finished:
return []
logger.info("Asking block.io for new received transaction batch, before_tx %s (%s)", self.before_tx, datetime.datetime.fromtimestamp(self.last_timestamp) if self.last_timestamp else "-")
if self.before_tx:
result = self.backend.block_io.get_transactions(type="received", before_tx=self.before_tx)
else:
result = self.backend.block_io.get_transactions(type="received")
txs = result["data"]["txs"]
# Confirmed oldest timestamp is the last
for tx in txs:
logger.debug("Tx txid:%s timestamp %s", tx["txid"], datetime.datetime.fromtimestamp(tx["time"]))
if txs:
# workaround
# <moo-_-> kindoge: there is still subtle bug in the last bug fix
# <moo-_-> https://block.io/api/v2/get_transactions/?api_key=0266-c2b6-c2c8-ee07&type=received&before_tx=d30a7d054c11718a6ce9ca6c9a5a95575e8cc7fb27f38f4427a65a02df4ba427
if self.before_tx == txs[-1]["txid"]:
self.finished = True
# The last txid to keep us iterating
self.before_tx = txs[-1]["txid"]
self.last_timestamp = txs[-1]["time"]
return [(tx["txid"], self._format_bitcoind_like(tx)) for tx in txs]
def clean_blockio_test_wallet(backend, balance_threshold=Decimal(1)):
"""Go through unused test addresses and archives them on block.io.
block.io has limit of 2000 active addresses on free plan. If you exceed the limit and do not archive your addresses, block.io stops doing withdrawals.
This helper function walks through a testnet wallet we use for unit tests and figures out which addresses look like test addresses, then consolidates them together.
:param balance_threshold: How low the address balance must be before we consolidate it together to one big balance address.
"""
block_io = backend.block_io
needs_archive = []
# Move all test transfers under this address
consolidation_address = None
result = block_io.get_my_addresses()
addresses = result["data"]["addresses"]
network = result["data"]["network"]
# block.io has an issue that you cannot withdrawal under certain threshold from address
# 2015-02
network_withdrawal_limits = {
"DOGETEST": Decimal(2)
}
network_fees = {
"DOGETEST": Decimal(1),
"BTCTEST": Decimal(1000) / Decimal(10 ** 8)
}
withdrawal_limit = network_withdrawal_limits.get(network, 0)
network_fee = network_fees.get(network, 0)
result = block_io.get_my_archived_addresses()
archived_addresses = [entry["address"] for entry in result["data"]["addresses"]]
for addr in addresses:
if addr["label"] == "default":
# block.io: Exception: Failed: Cannot archive addresses with label=default.
continue
# {'available_balance': '0.00000000', 'address': '2MvB2nKMKcWakVJxB3ZhPnG9eqWsEoX4CBD', 'user_id': 1, 'label': 'test-address-1413839537-401918', 'pending_received_balance': '0.00000000'}
balance = Decimal(addr["available_balance"]) + Decimal(addr["pending_received_balance"])
if balance == 0:
needs_archive.append(addr["address"])
continue
if balance < balance_threshold:
if not consolidation_address:
# Use the first found low balance address as the consolidation destionation
consolidation_address = addr["address"]
else:
if balance - network_fee < withdrawal_limit:
logger.debug("Cannot consolidate %s from %s, too low balance for block.io API call", balance, addr["address"])
else:
# Move everyhing from this address to the consolidation address
logger.debug("Consolidating %s from %s to %s", balance, addr["address"], consolidation_address)
block_io.withdraw_from_addresses(amounts=str(balance - network_fee), from_addresses=addr["address"], to_addresses=consolidation_address)
needs_archive.append(addr["address"])
not_yet_archived = set(needs_archive) - set(archived_addresses)
logger.debug("Archiving %d addresses from total %s, already archived %d, not yet archived %d", len(needs_archive), len(addresses), len(archived_addresses), len(not_yet_archived))
# block.io seems to have an upper limit how many addresses you can arcihive at once
for chunk in iterutil.grouper(256, not_yet_archived):
logger.debug("Archiving chunk of %d addresses", len(chunk))
result = block_io.archive_addresses(addresses=",".join(chunk))
assert result["status"] == "success"
| [
"logging.getLogger",
"datetime.datetime.fromtimestamp",
"block_io.BlockIo",
"decimal.Decimal",
"slugify.slugify"
] | [((1573, 1600), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1590, 1600), False, 'import logging\n'), ((8431, 8441), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (8438, 8441), False, 'from decimal import Decimal\n'), ((2611, 2636), 'block_io.BlockIo', '_BlockIo', (['api_key', 'pin', '(2)'], {}), '(api_key, pin, 2)\n', (2619, 2636), True, 'from block_io import BlockIo as _BlockIo\n'), ((2966, 2981), 'decimal.Decimal', 'Decimal', (['amount'], {}), '(amount)\n', (2973, 2981), False, 'from decimal import Decimal\n'), ((3312, 3326), 'slugify.slugify', 'slugify', (['label'], {}), '(label)\n', (3319, 3326), False, 'from slugify import slugify\n'), ((9405, 9415), 'decimal.Decimal', 'Decimal', (['(2)'], {}), '(2)\n', (9412, 9415), False, 'from decimal import Decimal\n'), ((9464, 9474), 'decimal.Decimal', 'Decimal', (['(1)'], {}), '(1)\n', (9471, 9474), False, 'from decimal import Decimal\n'), ((9495, 9508), 'decimal.Decimal', 'Decimal', (['(1000)'], {}), '(1000)\n', (9502, 9508), False, 'from decimal import Decimal\n'), ((9511, 9527), 'decimal.Decimal', 'Decimal', (['(10 ** 8)'], {}), '(10 ** 8)\n', (9518, 9527), False, 'from decimal import Decimal\n'), ((10175, 10209), 'decimal.Decimal', 'Decimal', (["addr['available_balance']"], {}), "(addr['available_balance'])\n", (10182, 10209), False, 'from decimal import Decimal\n'), ((10212, 10253), 'decimal.Decimal', 'Decimal', (["addr['pending_received_balance']"], {}), "(addr['pending_received_balance'])\n", (10219, 10253), False, 'from decimal import Decimal\n'), ((7233, 7285), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['self.last_timestamp'], {}), '(self.last_timestamp)\n', (7264, 7285), False, 'import datetime\n'), ((7715, 7758), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["tx['time']"], {}), "(tx['time'])\n", (7746, 7758), False, 'import datetime\n'), ((2170, 2190), 'decimal.Decimal', 'Decimal', (["op['value']"], {}), "(op['value'])\n", (2177, 2190), False, 'from decimal import Decimal\n'), ((6789, 6816), 'decimal.Decimal', 'Decimal', (["received['amount']"], {}), "(received['amount'])\n", (6796, 6816), False, 'from decimal import Decimal\n')] |
from setuptools import setup
setup(
name="flex_version",
version='1.2.3',
url='https://github.com/caesar0301/FlexVersion',
author='<NAME>',
author_email='<EMAIL>',
description='A cute Python library to manipulate version stuff.',
license="Apache License, Version 2.0",
packages=['flex_version'],
keywords=['utility', 'versioning'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
"setuptools.setup"
] | [((30, 968), 'setuptools.setup', 'setup', ([], {'name': '"""flex_version"""', 'version': '"""1.2.3"""', 'url': '"""https://github.com/caesar0301/FlexVersion"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A cute Python library to manipulate version stuff."""', 'license': '"""Apache License, Version 2.0"""', 'packages': "['flex_version']", 'keywords': "['utility', 'versioning']", 'classifiers': "['Development Status :: 4 - Beta', 'Environment :: Console',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules']"}), "(name='flex_version', version='1.2.3', url=\n 'https://github.com/caesar0301/FlexVersion', author='<NAME>',\n author_email='<EMAIL>', description=\n 'A cute Python library to manipulate version stuff.', license=\n 'Apache License, Version 2.0', packages=['flex_version'], keywords=[\n 'utility', 'versioning'], classifiers=['Development Status :: 4 - Beta',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules'])\n", (35, 968), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python
# Copyright 2017-2018 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import SimpleITK as sitk
import radiomics
from radiomics import featureextractor
import collections
def convertconfig(parameters):
kwargs = dict()
kwargs['binWidth'] = 25
kwargs['resampledPixelSpacing'] = None # [3,3,3] is an example for defining resampling (voxels with size 3x3x3mm)
kwargs['interpolator'] = sitk.sitkBSpline
kwargs['verbose'] = True
# Specific MR Settings: see https://github.com/Radiomics/pyradiomics/blob/master/examples/exampleSettings/exampleMR_NoResampling.yaml
kwargs['normalize'] = True
kwargs['normalizeScale'] = 100
kwargs['preCrop'] = True
kwargs['force2D'] = True
kwargs['force2Ddimension'] = 0 # axial slices, for coronal slices, use dimension 1 and for sagittal, dimension 2.
kwargs['binWidth'] = 5
kwargs['voxelArrayShift'] = 300
kwargs['label'] = 1
# NOTE: A little more tolerance may be required on matching the dimensions
kwargs['geometryTolerance'] = 1E-3
return kwargs
def AllFeatures(image, mask, parameters=None):
if parameters is None:
# Default settings for signature calculation from PyRadiomics
kwargs = {}
# These are currently set equal to the respective default values
kwargs['binWidth'] = 25
kwargs['resampledPixelSpacing'] = None # [3,3,3] is an example for defining resampling (voxels with size 3x3x3mm)
kwargs['interpolator'] = sitk.sitkBSpline
kwargs['verbose'] = True
# Specific MR Settings: see https://github.com/Radiomics/pyradiomics/blob/master/examples/exampleSettings/exampleMR_NoResampling.yaml
kwargs['normalize'] = True
kwargs['normalizeScale'] = 100
kwargs['preCrop'] = True
kwargs['force2D'] = True
kwargs['force2Ddimension'] = 0 # axial slices, for coronal slices, use dimension 1 and for sagittal, dimension 2.
kwargs['binWidth'] = 5
kwargs['voxelArrayShift'] = 300
kwargs['label'] = 1
# NOTE: A little more tolerance may be required on matching the dimensions
kwargs['geometryTolerance'] = 1E-3
else:
# Extract fields of parameters dict to right kwargs arguments
kwargs = convertconfig(parameters)
# Initialize wrapperClass to generate signature
extractor = featureextractor.RadiomicsFeaturesExtractor(**kwargs)
# Disable all classes except firstorder
extractor.enableAllFeatures()
# Prevent radiomics logger from printing out log entries with level < WARNING to the console
logger = logging.getLogger('radiomics')
logger.handlers[0].setLevel(logging.WARNING)
logger.propagate = False # Do not pass log messages on to root logger
# Write out all log entries to a file
handler = logging.FileHandler(filename='testLog.txt', mode='w')
formatter = logging.Formatter("%(levelname)s:%(name)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
print("Calculating features")
featureVector = extractor.execute(image, mask)
# Split center of mass features in the three dimensions
# Save as orientation features
COM_index = featureVector['diagnostics_Mask-original_CenterOfMassIndex']
featureVector['of_original_COM_Index_x'] = COM_index[0]
featureVector['of_original_COM_Index_y'] = COM_index[1]
featureVector['of_original_COM_Index_z'] = COM_index[2]
COM = featureVector['diagnostics_Mask-original_CenterOfMass']
featureVector['of_original_COM_x'] = COM[0]
featureVector['of_original_COM_y'] = COM[1]
featureVector['of_original_COM_z'] = COM[2]
# Delete all diagnostics features:
for k in featureVector.keys():
if 'diagnostics' in k:
del featureVector[k]
# Change label to be similar to PREDICT
new_featureVector = collections.OrderedDict()
texture_features = ['_glcm_', '_gldm_', '_glrlm_', '_glszm_', '_ngtdm']
for k in featureVector.keys():
if any(t in k for t in texture_features):
kn = 'tf_' + k
elif '_shape_' in k:
kn = 'sf_' + k
elif '_firstorder_' in k:
kn = 'hf_' + k
elif '_of_' in k:
# COM
kn = k
else:
message = ('Key {} is unknown!').format(k)
raise ValueError(message)
# Add PyRadiomics to the key
kn = 'PyRadiomics_' + kn
# Add to new feature Vector
new_featureVector[kn] = featureVector[k]
featureVector = new_featureVector
# Print the values and keys
nfeat = len(featureVector.keys())
print(('Total of {} feature computed:').format(str(nfeat)))
for featureName in featureVector.keys():
print("Computed %s: %s" % (featureName, featureVector[featureName]))
return featureVector, kwargs
| [
"logging.getLogger",
"collections.OrderedDict",
"radiomics.featureextractor.RadiomicsFeaturesExtractor",
"logging.Formatter",
"logging.FileHandler"
] | [((3018, 3071), 'radiomics.featureextractor.RadiomicsFeaturesExtractor', 'featureextractor.RadiomicsFeaturesExtractor', ([], {}), '(**kwargs)\n', (3061, 3071), False, 'from radiomics import featureextractor\n'), ((3262, 3292), 'logging.getLogger', 'logging.getLogger', (['"""radiomics"""'], {}), "('radiomics')\n", (3279, 3292), False, 'import logging\n'), ((3474, 3527), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""testLog.txt"""', 'mode': '"""w"""'}), "(filename='testLog.txt', mode='w')\n", (3493, 3527), False, 'import logging\n'), ((3544, 3600), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s:%(name)s: %(message)s"""'], {}), "('%(levelname)s:%(name)s: %(message)s')\n", (3561, 3600), False, 'import logging\n'), ((4527, 4552), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4550, 4552), False, 'import collections\n')] |
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from .models import User
class RegistrationForm(UserCreationForm):
class Meta:
model = User
fields = ('email',)
class LoginForm(AuthenticationForm):
username = forms.EmailField()
| [
"django.forms.EmailField"
] | [((288, 306), 'django.forms.EmailField', 'forms.EmailField', ([], {}), '()\n', (304, 306), False, 'from django import forms\n')] |
# -*- coding: utf-8 -*-
import re
import json
import requests
import itertools
from settings import *
import tools
# retrieve prices from tarkov-market for each item
def crawl_prices_tarkov_market(data):
logger.info("Getting JSON from " + CONST_TARKOV_MARKET)
try:
# escape the 401 first, no SimpleCookie because for some reason the 'Set-Cookie' is all fucked up
r = requests.get(CONST_TARKOV_MARKET)
r.raise_for_status()
cookies = r.headers['Set-Cookie']
cfduid = tools.find_substring(cookies, "__cfduid=", ";")
uid = tools.find_substring(cookies, " uid=", ";")
token = tools.find_substring(cookies, "token=", ";")
cookie_string = cfduid + '; ' + uid + '; ' + token
fake_header = {"Cookie": cookie_string}
# get the prices json
r = requests.get(CONST_TARKOV_MARKET_ITEMS, headers=fake_header)
r.raise_for_status()
market_json = json.loads(r.text)
items_list = market_json['items']
for item in items_list:
item_name = re.sub(' \([0-9]+\/[0-9]+\)', '', item['enName']) if 'enName' in item else None #remove the (xx/xx) from the names
item_price_day = int(item['avgDayPrice']) if 'avgDayPrice' in item else None
item_price_week = int(item['avgWeekPrice']) if 'avgWeekPrice' in item else None
item_price_slot_day = int(item['avgDayPricePerSlot']) if 'avgDayPricePerSlot' in item else None
item_price_slot_week = int(item['avgWeekPricePerSlot']) if 'avgWeekPricePerSlot' in item else None
item_price_change_day = int(item['change24']) if 'change24' in item else None
item_price_change_week = int(item['change7d']) if 'change7d' in item else None
item_trader_price = int(item['traderPrice']) if 'traderPrice' in item else None
item_trader_name = item['traderName'] if 'traderName' in item else None
item_price_date = item['priceUpdated'] if 'priceUpdated' in item else None
item_is_worth_resell = True if (item_price_day and item_trader_price) and (item_price_day <= item_trader_price) else False
if item_name in data:
data[item_name]['price_day'] = item_price_day
data[item_name]['price_week'] = item_price_week
data[item_name]['price_slot_day'] = item_price_slot_day
data[item_name]['price_slot_week'] = item_price_slot_week
data[item_name]['price_change_day'] = item_price_change_day
data[item_name]['price_change_week'] = item_price_change_week
data[item_name]['resell_name'] = item_trader_name
data[item_name]['resell_price'] = item_trader_price
data[item_name]['price_date'] = item_price_date
data[item_name]['worth_resell'] = item_is_worth_resell
except Exception as e:
logger.info("Warning: Failed crawling prices, reason: " + str(e))
pass
return data
# retrieve prices from loot goblin for each item
def crawl_prices_loot_goblin(data):
# get the prices json
logger.info("Getting JSON from " + CONST_LOOT_GOBLIN)
r = requests.get(CONST_LOOT_GOBLIN)
r.raise_for_status()
goblin_json = json.loads(r.text)
items_list = goblin_json['result']['data']['allDataJson']['nodes']
for item in items_list:
item_name = item['name']
item_price = item['price_avg']
item_price_slot = item['price_per_slot']
item_price_date = item['timestamp']
if item_name in data:
data[item_name]['price'] = item_price
data[item_name]['price_slot'] = item_price_slot
data[item_name]['price_date'] = item_price_date
return data
| [
"tools.find_substring",
"re.sub",
"json.loads",
"requests.get"
] | [((3212, 3243), 'requests.get', 'requests.get', (['CONST_LOOT_GOBLIN'], {}), '(CONST_LOOT_GOBLIN)\n', (3224, 3243), False, 'import requests\n'), ((3287, 3305), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3297, 3305), False, 'import json\n'), ((397, 430), 'requests.get', 'requests.get', (['CONST_TARKOV_MARKET'], {}), '(CONST_TARKOV_MARKET)\n', (409, 430), False, 'import requests\n'), ((522, 569), 'tools.find_substring', 'tools.find_substring', (['cookies', '"""__cfduid="""', '""";"""'], {}), "(cookies, '__cfduid=', ';')\n", (542, 569), False, 'import tools\n'), ((584, 627), 'tools.find_substring', 'tools.find_substring', (['cookies', '""" uid="""', '""";"""'], {}), "(cookies, ' uid=', ';')\n", (604, 627), False, 'import tools\n'), ((644, 688), 'tools.find_substring', 'tools.find_substring', (['cookies', '"""token="""', '""";"""'], {}), "(cookies, 'token=', ';')\n", (664, 688), False, 'import tools\n'), ((839, 899), 'requests.get', 'requests.get', (['CONST_TARKOV_MARKET_ITEMS'], {'headers': 'fake_header'}), '(CONST_TARKOV_MARKET_ITEMS, headers=fake_header)\n', (851, 899), False, 'import requests\n'), ((951, 969), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (961, 969), False, 'import json\n'), ((1069, 1121), 're.sub', 're.sub', (['""" \\\\([0-9]+\\\\/[0-9]+\\\\)"""', '""""""', "item['enName']"], {}), "(' \\\\([0-9]+\\\\/[0-9]+\\\\)', '', item['enName'])\n", (1075, 1121), False, 'import re\n')] |
import numpy as np
import os
from scipy.optimize import least_squares, minimize
from scipy.special import fresnel
def autophase(S):
'''Optimize phase of complex data by maximizing the sum of imaginary over sum of real
.. math::
\phi = \\arctan \left( \\frac{\sum_i^N \Im(s_i) }{ \sum_i^N \Re(s_i) } \\right)
S_{\phi} = S e^{-i \phi}
Args:
S (numpy.ndarray): Complex data
Returns:
numpy.ndarray: Automatically phased complex data
'''
phase = np.arctan(np.sum(np.imag(S))/np.sum(np.real(S)))
S_phased = np.exp(-1j * phase) * S
return S_phased
def add_noise(S, sigma):
'''Add noise to array
Args:
S (numpy.ndarray): Array to add noise to
sigma (float): Standard deviation of noise
Returns:
numpy.ndarray: Array with noise added
'''
S_noisy = S + sigma * np.random.randn(*np.shape(S))
return S_noisy
def kernel(t, r, method = 'fresnel', angles = 5000):
'''Return the Kernel Matrix.
.. math::
K(r,t) = \int_{0}^{\pi/2} \cos(\\theta) \cos[(3 \cos(\\theta)^2 - 1)\omega_{ee} t] d\\theta
\omega_{ee} = \\frac{\gamma_e^2\hbar}{r^3}
+-------------------+----------------------+
|Method |Description |
+===================+======================+
|'fresnel' |Fresnel Integral |
+-------------------+----------------------+
|'brute force' |Brute Force Method |
+-------------------+----------------------+
Args:
t (numpy.ndarray): Array of time values in seconds
r (numpy.ndarray): Array of radius (distance) values in meters
method (str): Method for calculating the kernel. By default, uses the fresnel integral
angles (int): For brute-force kernel, number of angles to average over
Returns:
numpy.ndarray: Numpy array of kernel. The first dimension is the time dimension. The second dimension is the distance dimension.
.. note::
The distance array (r) must have all values greater than zero to generate a proper kernel.
.. warning::
The number of angles must be carefully selected to ensure the Kernel matrix properly averages the angles for short distances.
Example::
t = np.r_[-0.1e-6:10e-6:1000j]
r = np.r_[1.5e-9:10e-9:100j]
K = kernel(t,r,angles = 2000)
'''
t = t.reshape(-1,1)
r = r.reshape(1,-1)
K = deer_trace(t,r,angles=angles)
return K
def load_kernel(filename = 'default_kernel.csv', directory = 'kernels'):
'''Import Kernel Matrix
'''
full_path = os.path.join(directory, filename)
kernel_matrix = np.loadtxt(full_path,delimiter = ',')
return kernel_matrix
def save_kernel(k, filename, directory = 'kernels'):
'''Save Kernel Matrix
Args:
filename (str): Kernel filename
k (numpy.ndarray): Kernel Matrix
directory (str): Path to Kernel filename
'''
full_path = os.path.join(directory,filename)
np.savetxt(full_path,k,delimiter = ',')
def background_dist(t):
'''Calculate the distance above which P(r) should be zero in background fit.
Args:
t (numpy.ndarray): Time axes
Returns:
r (float): Distance value for background fit
'''
oscillations = 2.
omega_ee = 2.*np.pi * oscillations / np.max(t)
r = ((2. * np.pi * 5.204e-20)/omega_ee)**(1./3.)
return r
def deer_trace(t, r, method = 'fresnel', angles=1000):
'''Calculate the DEER trace corresponding to a given time axes and distance value
+-------------------+----------------------+
|Method |Description |
+===================+======================+
|'fresnel' |Fresnel Integral |
+-------------------+----------------------+
|'brute force' |Brute Force Method |
+-------------------+----------------------+
Args:
t (numpy.ndarray): Time axes of DEER trace
r (float, int, numpy.ndarray): Distances value or values in meters
method (str): Method for calculating deer trace, by default uses fresnel integral
angles (int): For brute force method, number of angles to average when generating DEER trace
Returns:
numpy.ndarray: DEER trace
Example::
import numpy as np
from matplotlib.pylab import *
r = 4e-9
t = np.r[0.:10e-6:1000j]
trace = deer_trace(t,r)
figure()
plot(t,trace)
show()
'''
omega_ee = 2.*np.pi*(5.204e-20)/(r**3.)
if method == 'brute force':
theta_array = np.r_[0.:np.pi/2.:1j*angles]
trace = np.zeros_like(t)
for theta in theta_array:
omega = (omega_ee)*(3.*(np.cos(theta)**2.)-1.)
trace = trace + np.sin(theta)*np.cos(omega*t)
# Normalize by number of angles and Fresnel Integral
trace = trace / (angles * (np.sqrt(np.pi/8.)))
elif method == 'fresnel':
x = np.sqrt(6.*omega_ee*np.abs(t))/ np.sqrt(np.pi)
S, C = fresnel(x)
trace = np.cos(omega_ee*t)*(C/x) + np.sin(omega_ee*np.abs(t))*(S/x)
return trace
def background(t, tau, A, B, d = 3.):
'''DEER Background function
.. math::
A + B e^{- t^{d/3}/\\tau}
Args:
t (numpy.ndarray): Time axes for background function
tau (float): Time constant
A (float): Offset
B (float): Scaling factor
d (float): dimensionality of background function
Returns:
numpy.ndarray: Background signal
'''
background_signal = A + B*np.exp(-1*(np.abs(t)**(d/3.))/tau)
return background_signal
def background_x0(t, data):
'''Guess initial parameters for background function
Args:
data (numpy.ndarray): Array of data
t (numpy.ndarray): Array of axes
Returns:
list: List of parameters for fit initial guess
'''
A = data[-1]
B = np.max(data) - A
tau = 10e-6
d = 3.
x0 = [tau, A, B]
return x0
def tikhonov_background(t, r, K, data, background_function = background, r_background = None, lambda_ = 1., L = 'Identity', x0 = None):
'''Fit DEER data to background function by forcing P(r) to be zero
Args:
t (numpy.ndarray): Array of time axis values
r (numpy.ndarray): Array of distance values for Kernel
K (numpy.ndarray): Kernel matrix
data (numpy.ndarray): Array of data values
background_function (func): Background function
r_background (float): Distance above which P(r) is optimized to zero
lambda_ (float): Regularization parameter
L (str, numpy.ndarray): Regularization operator, by default Identity for background optimization
x0 (list): Initial guess for background fit parameters
Returns:
numpy.ndarray: Background fit of data
'''
# If None, determine r_background based on time trace
if r_background == None:
r_background = background_dist(t)
# If None, initial guess for background function
if x0 is None:
x0 = background_x0(t, data)
def res(x, data, t, r, K, r_background):
P_tik = tikhonov(K, (data / background_function(t, *x)) - 1., lambda_ = lambda_, L = L)
P_tik[r < r_background] = 0.
residual = P_tik
return residual
out = least_squares(res, x0, verbose = 2, args = (data, t, r, K, r_background), method = 'lm')
x = out['x']
fit = background_function(t, *x)
return fit
def exp_background(t, data, background_function = background, t_min = 0., x0 = None):
'''Fit DEER data to background function
Args:
t (numpy.ndarray): Array of time axis values
data (numpy.ndarray): Array of data values
background_function (func): Background function
t_min (float): Start time for fit
x0 (list): Initial guess for background fit parameters
Returns:
numpy.ndarray: Fit of data
'''
if x0 == None:
x0 = background_x0(t, data)
def res(x, t, data):
residual = data - background_function(t, *x)
return residual
# select range of data for fit
data_fit = data[t >= t_min]
t_fit = t[t >= t_min]
out = least_squares(res, x0, verbose = 2, args = (t_fit, data_fit), method = 'lm')
x = out['x']
fit = background_function(t,*x)
return fit
def operator(n, L):
'''Return operator for Regularization
+-------------------+----------------------+
|Operator (L) |Description |
+===================+======================+
|'Identity' |Identity Matrix |
+-------------------+----------------------+
|'1st Derivative' |1st Derivative Matrix |
+-------------------+----------------------+
|'2nd Derivative' |2nd Derivative Matrix |
+-------------------+----------------------+
Args:
n (int): Number of points in Kernal distance dimension
L (str, numpy.ndarray): String identifying name of operator or numpy array for operator to pass through function
Returns:
numpy.ndarray: Regularization operator as numpy array
'''
if L == 'Identity':
L = np.eye(n)
elif L == '1st Derivative':
L = np.diag(-1.*np.ones(n),k = 0)
L += np.diag(1.*np.ones(n-1),k = 1)
L = L[:-1,:]
elif (L == None) or (L == '2nd Derivative'):
L = np.diag(1.*np.ones(n),k = 0)
L += np.diag(-2.*np.ones(n-1),k = 1)
L += np.diag(1.*np.ones(n-2),k = 2)
L = L[:-2,:]
elif isinstance(L, str):
raise ValueError('Operator string not understood')
return L
def tikhonov(K, S, lambda_ = 1.0, L = None):
'''Perform Tikhonov Regularization
.. math::
P_\lambda = {(K^TK + \lambda^2 L^TL)}^{-1} K^TS
Args:
K (numpy.ndarray): Kernel Matrix
S (numpy.ndarray): Experimental DEER trace
lambda_ (float): Regularization parameter
L (None, numpy.ndarray): Tikhonov regularization operator, uses 2nd derivative if argument is None
Returns:
numpy.ndarray: Distance distribution from Tikhonov regularization
'''
# Select Real Part
S = np.real(S)
# Set Operator for Tikhonov Regularization
n = np.shape(K)[1]
# Determine Operator for Regularization
L = operator(n, L)
P_lambda = np.dot(np.linalg.inv(np.dot(K.T, K)+(lambda_**2.)*np.dot(L.T, L)),np.dot(K.T, S))
return P_lambda
def L_curve(K, S, lambda_array, L = None):
'''Generate Tikhonov L-curve
Args:
K (numpy.ndarray): Kernel Matrix
S (numpy.ndarray): Experimental DEER trace
lambda_ (numpy.ndarray): Array of Regularization parameters
L (None, numpy.ndarray): Tikhonov regularization operator, uses 2nd derivative if argument is None
Returns:
tuple: tuple containing:
rho_array (*numpy.ndarray*): Residual Norm
eta_array (*numpy.ndarray*): Solution Norm
'''
rho_list = []
eta_list = []
for lambda_ in lambda_array:
P_lambda = tikhonov(K, S, lambda_, L = L)
rho_list.append(np.linalg.norm(S - np.dot(K, P_lambda)))
eta_list.append(np.linalg.norm(P_lambda))
rho_array = np.array(rho_list)
eta_array = np.array(eta_list)
return rho_array, eta_array
def maximum_entropy(K, S, lambda_):
'''Maximum Entropy method for determining P(r)
.. math::
\Phi_{ME}[P] = \|K P(r) - S\|^2 + \lambda^2 \\times \int [P(r) \ln \\frac{P(r)}{P_0(r)} + \\frac{P_0(r)}{e}] dr \\Rightarrow \min
Args:
K (numpy.ndarray): Kernel Matrix
S (numpy.ndarray): Data array
lambda_ (float): Regularization parameter
Returns:
numpy.ndarray: Distance distribution minimized by maximum entropy method.
'''
def min_func(P, K, S, lambda_):
res = np.linalg.norm(np.dot(K, P) - S)**2. + (lambda_**2.)*np.sum((P*np.log((P/x0)) + x0/np.exp(1)))
return res
x0 = tikhonov(K, S, lambda_)
x0[x0<=0.] = 1.e-5
n = np.shape(K)[1]
bounds = tuple(zip(1e-15*np.ones(n),np.inf*np.ones(n)))
output = minimize(min_func, x0, args = (K, S, lambda_), method = 'SLSQP', bounds = bounds, options = {'disp':True})
P_lambda = output['x']
return P_lambda
def model_free(K, S, lambda_ = 1., L = None):
'''Model Free P(r) with non-negative constraints
.. math::
\Phi_{MF}[P(r)] = \|K P(r) - S\|^2 + \lambda^2 \| LP(r) \|^2 \\Rightarrow \min
Args:
K (numpy.ndarray): Kernel Matrix
S (numpy.ndarray): Data array
lambda_ (float): Regularization parameter
L (str, numpy.ndarray): Operator for regularization
Returns:
numpy.ndarray: Distance distribution from model free fit
'''
def min_func(P, K, S, lambda_, L):
res = np.linalg.norm(np.dot(K, P) - S)**2. + (lambda_**2.) * (np.linalg.norm(np.dot(L, P))**2.)
return res
n = np.shape(K)[1]
# Determine Operator for Regularization
L = operator(n, L)
x0 = tikhonov(K, S, lambda_)
x0[x0<=0.] = 1.e-5
bounds = tuple(zip(np.zeros(len(x0)), np.inf*np.ones(len(x0))))
output = minimize(min_func, x0, args = (K, S, lambda_, L), bounds = bounds, options = {'disp':True})
P_lambda = output['x']
return P_lambda
def gaussian(r, sigma, mu, Normalize = False):
'''Return Gaussian Distribution from given distance array, standard deviation, and mean distance
If Normalize = True:
.. math::
\\frac{1}{\sqrt{2 \pi {\sigma}^2}} e^{-{(r-\mu)}^2/(2\sigma^2)}
If Normalize = False:
.. math::
e^{-{(r-\mu)}^2/(2\sigma^2)}
Args:
r (numpy.ndarray): Numpy array of distance values
sigma (float): Standard deviation
mu (float): Mean distance
Normalize (bool): If True, the integral of Gaussian is normalized to 1
Returns:
numpy.ndarray: Gaussian distribution
'''
if Normalize:
gaussian_dist = (1./(np.sqrt(2*np.pi*(sigma**2.)))) * np.exp(-1*(r-mu)**2./(2.*(sigma**2.)))
else:
gaussian_dist = np.exp(-1*(r-mu)**2./(2.*(sigma**2.)))
return gaussian_dist
def gaussians(r, x):
'''Return sum of Gaussian distributions from given distance array and list of lists defining amplitude, standard deviation, and mean distance for each Gaussian
.. math::
\sum_{i = 1}^{N} A_i e^{-{(r-\mu_i)}^2/(2\sigma_i^2)}
Args:
r (numpy.ndarray): Numpy array of distance values
x (list): list of lists. Each gaussian is definied by a list of 3 parameters. The parameters are ordered: A - amplitude, sigma - standard deviation, mu - center of distribution.
Returns:
numpy.ndarray: Gaussian distribution
'''
gaussian_dist = np.zeros(len(r))
for gaussian_parameters in x:
A = gaussian_parameters[0]
sigma = gaussian_parameters[1]
mu = gaussian_parameters[2]
gaussian_dist += (A * gaussian(r, sigma, mu))
return gaussian_dist
def model_gaussian(K, S, r, x0 = None):
'''Gaussian based fit for distance distribution
Args:
K (numpy.ndarray): Kernel Matrix
S (numpy.ndarray): Data array
r (numpy.ndarray): Array of distance values
x0 (None, List): Initial guess. If None, the initial guess is automatically chosen based on Tikhonov regularization P(r)
Returns:
tuple: tuple containing:
P_gauss (*numpy.ndarray*): distance distribution
x_fit (*dict*): Dictionary of fitting parameters
'''
def min_func(x, K, S, r):
A = x[0]
sigma = x[1]
mu = x[2]
P_fit = A*gaussian(r, sigma, mu)
S_fit = np.dot(K, P_fit)
res = sum((S_fit - S)**2.)
return res
bounds = tuple(zip(np.zeros(3), np.inf*np.ones(3)))
if x0 == None: # Find initial guess based on Tikhonov Regularization
P_lambda = tikhonov(K, S, lambda_ = 1.0, L = None)
A_0 = np.max(P_lambda) # Amplitude is maximum value
sigma_0 = 0.2e-9 # Sigma is this value
mu_0 = r[np.argmax(P_lambda)] # center is maximum
def guess_min_func(x, P_lambda, r):
A = x[0]
sigma = x[1]
mu = x[2]
res = sum((A * gaussian(r,sigma,mu) - P_lambda)**2.)
return res
x0 = [A_0, sigma_0, mu_0]
guess_output = minimize(guess_min_func, x0, args = (P_lambda, r),method = 'Nelder-Mead', bounds = bounds, options = {'disp':True})
A_0 = guess_output['x'][0]
sigma_0 = guess_output['x'][1]
mu_0 = guess_output['x'][2]
x0 = [A_0,sigma_0,mu_0]
# output = minimize(min_func, x0, args = (K, S, r), bounds = bounds, options = {'disp':True})
output = minimize(min_func, x0, args = (K, S, r), method = 'Nelder-Mead', options = {'disp':True})
A = output['x'][0]
sigma = output['x'][1]
mu = output['x'][2]
P_gauss = A * gaussian(r, sigma, mu)
x_fit = {}
x_fit['A'] = A
x_fit['sigma'] = sigma
x_fit['mu'] = mu
return P_gauss, x_fit
def svd(K, S, cutoff = None):
'''Performs SVD on Kernel Matrix, singular values above cutoff index are set to zero, then calculates distance distribution with cutoff applied pseudo inverse.
.. math::
S = K P
S = U \Sigma V^T P
P = V \Sigma^{-1} U^T S
Args:
K (numpy.ndarray): Kernel
S (numpy.ndarray): Data array
cutoff (int): Number of singular values to include. None correponds to including all singular values (no cutoff applied).
Returns:
P (*numpy.ndarray*): Distance distribution array
'''
if cutoff is not None:
cutoff = int(cutoff)
# Perform SVD on Kernel
U, singular_values, V = np.linalg.svd(K)
# Apply Cutoff to singular values
singular_values[cutoff:] = 0
# Construct matrix of singular values
m, n = np.shape(K)
sigma = np.zeros((m, n))
sigma[:int(min(m, n)),:int(min(m, n))] = np.diag(singular_values)
# Inverse Matrix from SVD with cutoff applied
A = np.dot(V.T, np.dot(np.linalg.pinv(sigma), U.T))
# Calculate P(r)
P = np.dot(A, S)
return P
def zero_time(t, S, method = 'polyfit', **kwargs):
'''Shift DEER data to correct zero time offset
+-------------------+----------------------------------------------+
|Method |Description |
+===================+==============================================+
|'max' |Set zero time to maximum of data |
+-------------------+----------------------------------------------+
|'polyfit' |polynomial fit about time zero |
+-------------------+----------------------------------------------+
Parameters for 'polyfit' Method:
+-------------------+-------------------------------------------------------+------------+
|Argument |Description |Default |
+===================+=======================================================+============+
|'time_width' |Time width about zero for polynomial fit (in seconds) | 100e-9 |
+-------------------+-------------------------------------------------------+------------+
|'deg' |degree of polynomial fit | 3 |
+-------------------+-------------------------------------------------------+------------+
Args:
t (numpy.ndarray): Time axes
S (numpy.ndarray): Data array
method (str): Method to use for zero time correction
Returns:
tuple: tuple containing
*numpy.ndarray*: Shifted time axes
*numpy.ndarray*: Data array
'''
if method == 'max':
ix = np.argmax(S)
t = t - t[ix]
elif method == 'polyfit':
if 'time_width' in kwargs:
time_width = kwargs.pop('time_width')
else:
time_width = 100e-9
if 'deg' in kwargs:
deg = kwargs.pop('deg')
else:
deg = 3.
t_ix_min = np.argmin(np.abs(t + time_width/2.))
t_ix_max = np.argmin(np.abs(t - time_width/2.))
t_fit = t[t_ix_min:t_ix_max]
S_fit = S[t_ix_min:t_ix_max]
p = np.polyfit(t_fit, S_fit, deg)
pder = np.polyder(p)
near_zero_root = np.min(np.abs(np.roots(pder)))
t = t - near_zero_root
return t, S
def truncate(t, S, t_truncate):
'''Truncate time axes and data at given time
Args:
t (numpy.ndarray): Time axes
S (numpy.ndarray): Data Axes
t_truncate (float): time to trunctate data after
Returns:
tuple: tuple containing
*numpy.ndarray*: Truncated time axes
*numpy.ndarray*: Truncated data axes
'''
ix = np.argmin(np.abs(t - t_truncate))
t = t[:ix]
S = S[:ix]
return t, S
if __name__ == '__main__':
pass
| [
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.polyfit",
"numpy.log",
"numpy.roots",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.imag",
"scipy.optimize.least_squares",
"numpy.max",
"numpy.exp",
"numpy.real",
"numpy.dot",
"scipy.special.fresnel",
"numpy.abs",
"numpy.eye",
"nu... | [((2633, 2666), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (2645, 2666), False, 'import os\n'), ((2687, 2723), 'numpy.loadtxt', 'np.loadtxt', (['full_path'], {'delimiter': '""","""'}), "(full_path, delimiter=',')\n", (2697, 2723), True, 'import numpy as np\n'), ((2995, 3028), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (3007, 3028), False, 'import os\n'), ((3033, 3072), 'numpy.savetxt', 'np.savetxt', (['full_path', 'k'], {'delimiter': '""","""'}), "(full_path, k, delimiter=',')\n", (3043, 3072), True, 'import numpy as np\n'), ((7374, 7460), 'scipy.optimize.least_squares', 'least_squares', (['res', 'x0'], {'verbose': '(2)', 'args': '(data, t, r, K, r_background)', 'method': '"""lm"""'}), "(res, x0, verbose=2, args=(data, t, r, K, r_background),\n method='lm')\n", (7387, 7460), False, 'from scipy.optimize import least_squares, minimize\n'), ((8263, 8333), 'scipy.optimize.least_squares', 'least_squares', (['res', 'x0'], {'verbose': '(2)', 'args': '(t_fit, data_fit)', 'method': '"""lm"""'}), "(res, x0, verbose=2, args=(t_fit, data_fit), method='lm')\n", (8276, 8333), False, 'from scipy.optimize import least_squares, minimize\n'), ((10226, 10236), 'numpy.real', 'np.real', (['S'], {}), '(S)\n', (10233, 10236), True, 'import numpy as np\n'), ((11283, 11301), 'numpy.array', 'np.array', (['rho_list'], {}), '(rho_list)\n', (11291, 11301), True, 'import numpy as np\n'), ((11318, 11336), 'numpy.array', 'np.array', (['eta_list'], {}), '(eta_list)\n', (11326, 11336), True, 'import numpy as np\n'), ((12176, 12279), 'scipy.optimize.minimize', 'minimize', (['min_func', 'x0'], {'args': '(K, S, lambda_)', 'method': '"""SLSQP"""', 'bounds': 'bounds', 'options': "{'disp': True}"}), "(min_func, x0, args=(K, S, lambda_), method='SLSQP', bounds=bounds,\n options={'disp': True})\n", (12184, 12279), False, 'from scipy.optimize import least_squares, minimize\n'), ((13216, 13307), 'scipy.optimize.minimize', 'minimize', (['min_func', 'x0'], {'args': '(K, S, lambda_, L)', 'bounds': 'bounds', 'options': "{'disp': True}"}), "(min_func, x0, args=(K, S, lambda_, L), bounds=bounds, options={\n 'disp': True})\n", (13224, 13307), False, 'from scipy.optimize import least_squares, minimize\n'), ((16821, 16910), 'scipy.optimize.minimize', 'minimize', (['min_func', 'x0'], {'args': '(K, S, r)', 'method': '"""Nelder-Mead"""', 'options': "{'disp': True}"}), "(min_func, x0, args=(K, S, r), method='Nelder-Mead', options={\n 'disp': True})\n", (16829, 16910), False, 'from scipy.optimize import least_squares, minimize\n'), ((17831, 17847), 'numpy.linalg.svd', 'np.linalg.svd', (['K'], {}), '(K)\n', (17844, 17847), True, 'import numpy as np\n'), ((17974, 17985), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (17982, 17985), True, 'import numpy as np\n'), ((17998, 18014), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (18006, 18014), True, 'import numpy as np\n'), ((18060, 18084), 'numpy.diag', 'np.diag', (['singular_values'], {}), '(singular_values)\n', (18067, 18084), True, 'import numpy as np\n'), ((18222, 18234), 'numpy.dot', 'np.dot', (['A', 'S'], {}), '(A, S)\n', (18228, 18234), True, 'import numpy as np\n'), ((569, 590), 'numpy.exp', 'np.exp', (['(-1.0j * phase)'], {}), '(-1.0j * phase)\n', (575, 590), True, 'import numpy as np\n'), ((3367, 3376), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (3373, 3376), True, 'import numpy as np\n'), ((4681, 4697), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (4694, 4697), True, 'import numpy as np\n'), ((5963, 5975), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (5969, 5975), True, 'import numpy as np\n'), ((9230, 9239), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (9236, 9239), True, 'import numpy as np\n'), ((10293, 10304), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (10301, 10304), True, 'import numpy as np\n'), ((10458, 10472), 'numpy.dot', 'np.dot', (['K.T', 'S'], {}), '(K.T, S)\n', (10464, 10472), True, 'import numpy as np\n'), ((12086, 12097), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (12094, 12097), True, 'import numpy as np\n'), ((12993, 13004), 'numpy.shape', 'np.shape', (['K'], {}), '(K)\n', (13001, 13004), True, 'import numpy as np\n'), ((14147, 14198), 'numpy.exp', 'np.exp', (['(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))'], {}), '(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))\n', (14153, 14198), True, 'import numpy as np\n'), ((15765, 15781), 'numpy.dot', 'np.dot', (['K', 'P_fit'], {}), '(K, P_fit)\n', (15771, 15781), True, 'import numpy as np\n'), ((16041, 16057), 'numpy.max', 'np.max', (['P_lambda'], {}), '(P_lambda)\n', (16047, 16057), True, 'import numpy as np\n'), ((16450, 16563), 'scipy.optimize.minimize', 'minimize', (['guess_min_func', 'x0'], {'args': '(P_lambda, r)', 'method': '"""Nelder-Mead"""', 'bounds': 'bounds', 'options': "{'disp': True}"}), "(guess_min_func, x0, args=(P_lambda, r), method='Nelder-Mead',\n bounds=bounds, options={'disp': True})\n", (16458, 16563), False, 'from scipy.optimize import least_squares, minimize\n'), ((19912, 19924), 'numpy.argmax', 'np.argmax', (['S'], {}), '(S)\n', (19921, 19924), True, 'import numpy as np\n'), ((20976, 20998), 'numpy.abs', 'np.abs', (['(t - t_truncate)'], {}), '(t - t_truncate)\n', (20982, 20998), True, 'import numpy as np\n'), ((5071, 5081), 'scipy.special.fresnel', 'fresnel', (['x'], {}), '(x)\n', (5078, 5081), False, 'from scipy.special import fresnel\n'), ((11236, 11260), 'numpy.linalg.norm', 'np.linalg.norm', (['P_lambda'], {}), '(P_lambda)\n', (11250, 11260), True, 'import numpy as np\n'), ((14074, 14125), 'numpy.exp', 'np.exp', (['(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))'], {}), '(-1 * (r - mu) ** 2.0 / (2.0 * sigma ** 2.0))\n', (14080, 14125), True, 'import numpy as np\n'), ((15861, 15872), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (15869, 15872), True, 'import numpy as np\n'), ((16151, 16170), 'numpy.argmax', 'np.argmax', (['P_lambda'], {}), '(P_lambda)\n', (16160, 16170), True, 'import numpy as np\n'), ((18163, 18184), 'numpy.linalg.pinv', 'np.linalg.pinv', (['sigma'], {}), '(sigma)\n', (18177, 18184), True, 'import numpy as np\n'), ((20412, 20441), 'numpy.polyfit', 'np.polyfit', (['t_fit', 'S_fit', 'deg'], {}), '(t_fit, S_fit, deg)\n', (20422, 20441), True, 'import numpy as np\n'), ((20458, 20471), 'numpy.polyder', 'np.polyder', (['p'], {}), '(p)\n', (20468, 20471), True, 'import numpy as np\n'), ((521, 531), 'numpy.imag', 'np.imag', (['S'], {}), '(S)\n', (528, 531), True, 'import numpy as np\n'), ((540, 550), 'numpy.real', 'np.real', (['S'], {}), '(S)\n', (547, 550), True, 'import numpy as np\n'), ((4946, 4966), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 8.0)'], {}), '(np.pi / 8.0)\n', (4953, 4966), True, 'import numpy as np\n'), ((5040, 5054), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (5047, 5054), True, 'import numpy as np\n'), ((10413, 10427), 'numpy.dot', 'np.dot', (['K.T', 'K'], {}), '(K.T, K)\n', (10419, 10427), True, 'import numpy as np\n'), ((12131, 12141), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (12138, 12141), True, 'import numpy as np\n'), ((12149, 12159), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (12156, 12159), True, 'import numpy as np\n'), ((14041, 14074), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma ** 2.0)'], {}), '(2 * np.pi * sigma ** 2.0)\n', (14048, 14074), True, 'import numpy as np\n'), ((15881, 15891), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (15888, 15891), True, 'import numpy as np\n'), ((20241, 20269), 'numpy.abs', 'np.abs', (['(t + time_width / 2.0)'], {}), '(t + time_width / 2.0)\n', (20247, 20269), True, 'import numpy as np\n'), ((20297, 20325), 'numpy.abs', 'np.abs', (['(t - time_width / 2.0)'], {}), '(t - time_width / 2.0)\n', (20303, 20325), True, 'import numpy as np\n'), ((901, 912), 'numpy.shape', 'np.shape', (['S'], {}), '(S)\n', (909, 912), True, 'import numpy as np\n'), ((4819, 4832), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4825, 4832), True, 'import numpy as np\n'), ((4833, 4850), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (4839, 4850), True, 'import numpy as np\n'), ((5099, 5119), 'numpy.cos', 'np.cos', (['(omega_ee * t)'], {}), '(omega_ee * t)\n', (5105, 5119), True, 'import numpy as np\n'), ((9296, 9306), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9303, 9306), True, 'import numpy as np\n'), ((9338, 9352), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (9345, 9352), True, 'import numpy as np\n'), ((10442, 10456), 'numpy.dot', 'np.dot', (['L.T', 'L'], {}), '(L.T, L)\n', (10448, 10456), True, 'import numpy as np\n'), ((11190, 11209), 'numpy.dot', 'np.dot', (['K', 'P_lambda'], {}), '(K, P_lambda)\n', (11196, 11209), True, 'import numpy as np\n'), ((20512, 20526), 'numpy.roots', 'np.roots', (['pder'], {}), '(pder)\n', (20520, 20526), True, 'import numpy as np\n'), ((5028, 5037), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (5034, 5037), True, 'import numpy as np\n'), ((9451, 9461), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9458, 9461), True, 'import numpy as np\n'), ((9494, 9508), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (9501, 9508), True, 'import numpy as np\n'), ((9538, 9552), 'numpy.ones', 'np.ones', (['(n - 2)'], {}), '(n - 2)\n', (9545, 9552), True, 'import numpy as np\n'), ((11921, 11933), 'numpy.dot', 'np.dot', (['K', 'P'], {}), '(K, P)\n', (11927, 11933), True, 'import numpy as np\n'), ((12890, 12902), 'numpy.dot', 'np.dot', (['K', 'P'], {}), '(K, P)\n', (12896, 12902), True, 'import numpy as np\n'), ((12946, 12958), 'numpy.dot', 'np.dot', (['L', 'P'], {}), '(L, P)\n', (12952, 12958), True, 'import numpy as np\n'), ((4768, 4781), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4774, 4781), True, 'import numpy as np\n'), ((5142, 5151), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (5148, 5151), True, 'import numpy as np\n'), ((5626, 5635), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (5632, 5635), True, 'import numpy as np\n'), ((11969, 11983), 'numpy.log', 'np.log', (['(P / x0)'], {}), '(P / x0)\n', (11975, 11983), True, 'import numpy as np\n'), ((11989, 11998), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (11995, 11998), True, 'import numpy as np\n')] |
import sys, arrow, time, dateutil
from uuid import uuid4
from datetime import datetime
sys.path.insert(0, 'controller/')
#Import library essentials
from sumy.parsers.plaintext import PlaintextParser #We're choosing a plaintext parser here, other parsers available for HTML etc.
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer #We're choosing Lexrank, other algorithms are also built in
#from summa import summarizer
from textblob.classifiers import NaiveBayesClassifier
class Story:
global unique_id
unique_id = 0
def __init__(self):
global unique_id
self.story_id = unique_id
unique_id += 1
self.title = ""
self.date = time.strftime("%d/%m/%y %H:%M:%S")
self.category = None
self.story = ""
self.sources = []
def get_unique_id(self):
return self.story_id
def set_unique_id(self, story_id):
self.story_id = story_id
def get_title(self):
try:
if self.title is "":
return self.sources[0].headline
else:
return self.title
except:
return str(len(sources))
def set_title(self, title):
self.title = title
def get_date(self):
return self.date
def get_friendly_date(self):
tz = 'Europe/London'
arrow_date = arrow.get(self.date, 'DD/MM/YY HH:mm:ss').replace(tzinfo=dateutil.tz.gettz(tz))
return arrow_date.humanize()
def set_date(self, date):
self.date = date
def get_category(self):
if self.category is not None:
return self.category
with open('model/categories.json', 'r') as fp:
cl = NaiveBayesClassifier(fp, format="json")
self.category = cl.classify(self.story)
return self.category
def set_category(self, category):
self.category = category
def get_story(self):
if self.story is "":
added_story = ""
for source in self.sources:
added_story += " " + source.get_story()
return self.get_summary(added_story)
else:
return self.story
def set_story(self, story):
self.story = story
def get_sources(self):
return self.sources
def add_source(self, source):
self.sources.append(source)
def is_breaking(self):
if datetime.strptime(self.get_date(), "%d/%m/%y %H:%M:%S").date() == datetime.today().date():
if (int(datetime.today().hour) - int(datetime.strptime(self.get_date(), "%d/%m/%y %H:%M:%S").hour)) <= 1:
return True
return False
def source_exists(self, url):
result = False
for source in self.sources:
if source.check_source(url):
return True
return result
def get_summary(self, text):
parser = PlaintextParser.from_string(text, Tokenizer("english"))
summarizer = LexRankSummarizer()
summary = summarizer(parser.document, 3) #Summarize the document with 5 sentences
result = ""
for sentence in summary:
result += " " + str(sentence)
#summarizer.summarize(text, words=250, language="english")
return result
| [
"sys.path.insert",
"textblob.classifiers.NaiveBayesClassifier",
"sumy.summarizers.lex_rank.LexRankSummarizer",
"dateutil.tz.gettz",
"time.strftime",
"arrow.get",
"sumy.nlp.tokenizers.Tokenizer",
"datetime.datetime.today"
] | [((88, 121), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""controller/"""'], {}), "(0, 'controller/')\n", (103, 121), False, 'import sys, arrow, time, dateutil\n'), ((692, 726), 'time.strftime', 'time.strftime', (['"""%d/%m/%y %H:%M:%S"""'], {}), "('%d/%m/%y %H:%M:%S')\n", (705, 726), False, 'import sys, arrow, time, dateutil\n'), ((2581, 2600), 'sumy.summarizers.lex_rank.LexRankSummarizer', 'LexRankSummarizer', ([], {}), '()\n', (2598, 2600), False, 'from sumy.summarizers.lex_rank import LexRankSummarizer\n'), ((1520, 1559), 'textblob.classifiers.NaiveBayesClassifier', 'NaiveBayesClassifier', (['fp'], {'format': '"""json"""'}), "(fp, format='json')\n", (1540, 1559), False, 'from textblob.classifiers import NaiveBayesClassifier\n'), ((2544, 2564), 'sumy.nlp.tokenizers.Tokenizer', 'Tokenizer', (['"""english"""'], {}), "('english')\n", (2553, 2564), False, 'from sumy.nlp.tokenizers import Tokenizer\n'), ((1222, 1263), 'arrow.get', 'arrow.get', (['self.date', '"""DD/MM/YY HH:mm:ss"""'], {}), "(self.date, 'DD/MM/YY HH:mm:ss')\n", (1231, 1263), False, 'import sys, arrow, time, dateutil\n'), ((1279, 1300), 'dateutil.tz.gettz', 'dateutil.tz.gettz', (['tz'], {}), '(tz)\n', (1296, 1300), False, 'import sys, arrow, time, dateutil\n'), ((2157, 2173), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2171, 2173), False, 'from datetime import datetime\n'), ((2193, 2209), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2207, 2209), False, 'from datetime import datetime\n')] |
'''
IoT Bootcamp
Trainer: <NAME>
Phase 4 - Part 2: Building Web App for Home Automation
Code Sample: Controlling LED using an HTML page
'''
from flask import Flask, render_template, request
from gpiozero import LED
from time import sleep
app = Flask(__name__)
# Default: GPIO4
led = LED(4)
@app.route('/')
def home():
led_data = {
'led_status': 0,
'pin': 4,
'action': 'off',
}
return render_template('home.html', data=led_data)
@app.route('/<action>')
def action(action):
led_data = {
'led_status': 0,
'pin': 4,
'action': action,
}
if action == 'on':
led.on()
print("led_data['pin']", led_data['pin'])
led_data['led_status'] = 1
elif action == 'off':
led.off()
led_data['led_status'] = 0
return render_template('home.html', data=led_data)
if __name__ == "__main__":
app.run(debug=True)
| [
"flask.render_template",
"gpiozero.LED",
"flask.Flask"
] | [((245, 260), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (250, 260), False, 'from flask import Flask, render_template, request\n'), ((285, 291), 'gpiozero.LED', 'LED', (['(4)'], {}), '(4)\n', (288, 291), False, 'from gpiozero import LED\n'), ((425, 468), 'flask.render_template', 'render_template', (['"""home.html"""'], {'data': 'led_data'}), "('home.html', data=led_data)\n", (440, 468), False, 'from flask import Flask, render_template, request\n'), ((822, 865), 'flask.render_template', 'render_template', (['"""home.html"""'], {'data': 'led_data'}), "('home.html', data=led_data)\n", (837, 865), False, 'from flask import Flask, render_template, request\n')] |
import os
import cv2
import numpy as np
from PIL import Image
from IPython.display import Video
from IPython.display import display as ds
DESTINATION_FOLDER = "results"
def check_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def display(images_array, save=False):
for im in images_array:
nparr = np.fromstring(im, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
ds(Image.fromarray(image))
if save:
check_folder(DESTINATION_FOLDER)
counter = 0
for im in images_array:
img_file = DESTINATION_FOLDER + '/res_' + str(counter) + '.jpg'
counter += 1
fd = open(img_file, 'wb')
fd.write(im)
fd.close()
def draw_bboxes(image, boxes=[], tags=[], save=False):
nparr = np.fromstring(image, np.uint8)
cv_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Draw a rectangle around the faces
counter = 0
for coords in boxes:
left = coords["x"]
top = coords["y"]
right = coords["x"] + coords["width"]
bottom = coords["y"] + coords["height"]
cv2.rectangle(cv_image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(cv_image, tags[counter], (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
counter += 1
cv_image_rgb = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
ds(Image.fromarray(cv_image_rgb))
if save:
check_folder(DESTINATION_FOLDER)
img_file = DESTINATION_FOLDER + '/res_bboxes.jpg'
cv2.imwrite(img_file, cv_image)
def display_video_mp4(blob):
check_folder(DESTINATION_FOLDER)
name = DESTINATION_FOLDER + "/" + "video_tmp.mp4"
fd = open(name, 'wb')
fd.write(blob)
fd.close()
ds(Video(name, embed=True))
| [
"cv2.rectangle",
"os.path.exists",
"PIL.Image.fromarray",
"cv2.imwrite",
"os.makedirs",
"IPython.display.Video",
"cv2.putText",
"cv2.imdecode",
"cv2.cvtColor",
"numpy.fromstring"
] | [((888, 918), 'numpy.fromstring', 'np.fromstring', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (901, 918), True, 'import numpy as np\n'), ((934, 971), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_COLOR'], {}), '(nparr, cv2.IMREAD_COLOR)\n', (946, 971), False, 'import cv2\n'), ((1482, 1523), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_BGR2RGB'], {}), '(cv_image, cv2.COLOR_BGR2RGB)\n', (1494, 1523), False, 'import cv2\n'), ((209, 231), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (223, 231), False, 'import os\n'), ((245, 264), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (256, 264), False, 'import os\n'), ((350, 377), 'numpy.fromstring', 'np.fromstring', (['im', 'np.uint8'], {}), '(im, np.uint8)\n', (363, 377), True, 'import numpy as np\n'), ((394, 431), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_COLOR'], {}), '(nparr, cv2.IMREAD_COLOR)\n', (406, 431), False, 'import cv2\n'), ((448, 486), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (460, 486), False, 'import cv2\n'), ((1215, 1284), 'cv2.rectangle', 'cv2.rectangle', (['cv_image', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(cv_image, (left, top), (right, bottom), (0, 255, 0), 2)\n', (1228, 1284), False, 'import cv2\n'), ((1345, 1445), 'cv2.putText', 'cv2.putText', (['cv_image', 'tags[counter]', '(left, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(0, 255, 0)', '(2)'], {}), '(cv_image, tags[counter], (left, y), cv2.FONT_HERSHEY_SIMPLEX, \n 0.75, (0, 255, 0), 2)\n', (1356, 1445), False, 'import cv2\n'), ((1531, 1560), 'PIL.Image.fromarray', 'Image.fromarray', (['cv_image_rgb'], {}), '(cv_image_rgb)\n', (1546, 1560), False, 'from PIL import Image\n'), ((1683, 1714), 'cv2.imwrite', 'cv2.imwrite', (['img_file', 'cv_image'], {}), '(img_file, cv_image)\n', (1694, 1714), False, 'import cv2\n'), ((1906, 1929), 'IPython.display.Video', 'Video', (['name'], {'embed': '(True)'}), '(name, embed=True)\n', (1911, 1929), False, 'from IPython.display import Video\n'), ((498, 520), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (513, 520), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
# coding=utf-8
import unittest
from app.domain.model import User, AnonymousUser, Permission, Role
class UserModelTestCase(unittest.TestCase):
def test_password_setter(self):
u = User(password = '<PASSWORD>')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password = '<PASSWORD>')
with self.assertRaises(AttributeError):
u.password
def test_password_verifcation(self):
u = User(password = '<PASSWORD>')
self.assertTrue(u.verify_password('<PASSWORD>'))
self.assertFalse(u.verify_password('<PASSWORD>'))
def test_password_salts_are_random(self):
u = User(password = '<PASSWORD>')
u2 = User(password = '<PASSWORD>')
self.assertTrue(u.password_hash != u2.password_hash)
def test_roles_and_permissions(self):
Role.insert_roles()
u = User(email='<EMAIL>', password="<PASSWORD>")
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
| [
"app.domain.model.User",
"app.domain.model.Role.insert_roles",
"app.domain.model.AnonymousUser"
] | [((214, 241), 'app.domain.model.User', 'User', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (218, 241), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((349, 376), 'app.domain.model.User', 'User', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (353, 376), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((504, 531), 'app.domain.model.User', 'User', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (508, 531), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((708, 735), 'app.domain.model.User', 'User', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (712, 735), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((751, 778), 'app.domain.model.User', 'User', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (755, 778), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((905, 924), 'app.domain.model.Role.insert_roles', 'Role.insert_roles', ([], {}), '()\n', (922, 924), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((937, 981), 'app.domain.model.User', 'User', ([], {'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password='<PASSWORD>')\n", (941, 981), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n'), ((1150, 1165), 'app.domain.model.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (1163, 1165), False, 'from app.domain.model import User, AnonymousUser, Permission, Role\n')] |
import os
import sys
import logging
from flask import Flask # type: ignore
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
if test_config is None:
# load config, if it exists, when not testing
app.config.from_object(
os.environ.get("APP_PROFILE", "doppelkopf.config.DevelopmentConfig")
)
else:
# load the test config
app.config.from_object("doppelkopf.config.TestingConfig")
app.config.update(test_config)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
sentry_sdk.init(
dsn="https://103f1e1585fc47efb1b56a24db8b9dcc@sentry.io/1449084",
environment=app.config["ENV_NAME"],
integrations=[FlaskIntegration(), SqlalchemyIntegration()],
)
from doppelkopf import admin, api
app.register_blueprint(admin.blueprint)
app.register_blueprint(api.blueprint)
from doppelkopf import db
db.init_app(app)
return app
| [
"sentry_sdk.integrations.flask.FlaskIntegration",
"logging.StreamHandler",
"flask.Flask",
"os.environ.get",
"sentry_sdk.integrations.sqlalchemy.SqlalchemyIntegration",
"doppelkopf.db.init_app"
] | [((304, 350), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (309, 350), False, 'from flask import Flask\n'), ((1179, 1195), 'doppelkopf.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (1190, 1195), False, 'from doppelkopf import db\n'), ((730, 763), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (751, 763), False, 'import logging\n'), ((478, 546), 'os.environ.get', 'os.environ.get', (['"""APP_PROFILE"""', '"""doppelkopf.config.DevelopmentConfig"""'], {}), "('APP_PROFILE', 'doppelkopf.config.DevelopmentConfig')\n", (492, 546), False, 'import os\n'), ((965, 983), 'sentry_sdk.integrations.flask.FlaskIntegration', 'FlaskIntegration', ([], {}), '()\n', (981, 983), False, 'from sentry_sdk.integrations.flask import FlaskIntegration\n'), ((985, 1008), 'sentry_sdk.integrations.sqlalchemy.SqlalchemyIntegration', 'SqlalchemyIntegration', ([], {}), '()\n', (1006, 1008), False, 'from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration\n')] |